mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-16 09:12:47 +00:00
Compare commits
79 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 95f5d6045e | |||
| d33703b437 | |||
| e9c96b0b11 | |||
| 2bbf1e1017 | |||
| db20c1e53a | |||
| 44c6fbbf58 | |||
| e2d4076c79 | |||
| 53e381a9fc | |||
| 4d7fb46d12 | |||
| d775b6971b | |||
| c289ddacf2 | |||
| 3fd9c51086 | |||
| de01087246 | |||
| fe42bb47f7 | |||
| c56bd519bb | |||
| 79b29d9437 | |||
| 82eecec277 | |||
| ceacd077d2 | |||
| 5a0fb13ece | |||
| 78439b4c0c | |||
| 356ccdfc35 | |||
| 06f94f884f | |||
| b8836c6404 | |||
| ac79b86810 | |||
| 793c2ae947 | |||
| cdcc5c6e35 | |||
| 51db81aa5c | |||
| a51a185f49 | |||
| 90453fd07e | |||
| d740bf84c3 | |||
| f3cf824950 | |||
| d13d2677ea | |||
| b076c98ba1 | |||
| d071dea7f7 | |||
| d9782c7b8a | |||
| f85450d0b5 | |||
| b129326ed6 | |||
| eaf0d06b63 | |||
| 019dec744b | |||
| 87f3e0a138 | |||
| 8e3c856a14 | |||
| 12c2439196 | |||
| deb1e0ff34 | |||
| 808e8297b0 | |||
| 738ce56955 | |||
| 190fd0b93c | |||
| ca6df26918 | |||
| bcfeb97e4a | |||
| 0234957907 | |||
| 8713b74204 | |||
| cbaddad358 | |||
| 2379544425 | |||
| 29fefba62e | |||
| 098382117e | |||
| d816d73174 | |||
| 30eb78c293 | |||
| a671b092ee | |||
| 0edf199282 | |||
| 2478555f0e | |||
| b07080245d | |||
| 2ebf217bb0 | |||
| bb527024d9 | |||
| e897978c3e | |||
| 00f1c02532 | |||
| 348d1a2fda | |||
| f1df8ba458 | |||
| b5ea418933 | |||
| 734fa5a4e6 | |||
| 08f6d4b69b | |||
| 29d3bb9f9a | |||
| 4d217e642b | |||
| bd56e03991 | |||
| 0b6aa0ddcd | |||
| 4f3496194d | |||
| d09a680aaa | |||
| 56d7431d56 | |||
| abae5f1626 | |||
| 7d0e94eecb | |||
| 23b65c7728 |
@@ -145,7 +145,7 @@ jobs:
|
||||
working-directory: ./api
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run safety check --ignore 70612,66963
|
||||
poetry run safety check --ignore 70612,66963,74429
|
||||
|
||||
- name: Vulture
|
||||
working-directory: ./api
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
name: Prowler - Merged Pull Request
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: ['master']
|
||||
types: ['closed']
|
||||
|
||||
jobs:
|
||||
trigger-cloud-pull-request:
|
||||
name: Trigger Cloud Pull Request
|
||||
if: github.event.pull_request.merged == true && github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Trigger pull request
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-pull-request-merged
|
||||
client-payload: '{
|
||||
"PROWLER_COMMIT_SHA": "${{ github.sha }}",
|
||||
"PROWLER_COMMIT_SHORT_SHA": "${{ env.SHORT_SHA }}",
|
||||
"PROWLER_PR_TITLE": "${{ github.event.pull_request.title }}",
|
||||
"PROWLER_PR_LABELS": ${{ toJson(github.event.pull_request.labels.*.name) }},
|
||||
"PROWLER_PR_BODY": ${{ toJson(github.event.pull_request.body) }}
|
||||
}'
|
||||
@@ -0,0 +1,94 @@
|
||||
name: SDK - Bump Version
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
|
||||
env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
name: Bump Version
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Get Prowler version
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
|
||||
MAJOR_VERSION=${BASH_REMATCH[1]}
|
||||
MINOR_VERSION=${BASH_REMATCH[2]}
|
||||
FIX_VERSION=${BASH_REMATCH[3]}
|
||||
|
||||
if (( MAJOR_VERSION == 5 )); then
|
||||
if (( FIX_VERSION == 0 )); then
|
||||
echo "Minor Release: $PROWLER_VERSION"
|
||||
|
||||
BUMP_VERSION_TO=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).${FIX_VERSION}
|
||||
echo "BUMP_VERSION_TO=${BUMP_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
TARGET_BRANCH=${BASE_BRANCH}
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping to next minor version: ${BUMP_VERSION_TO} in branch ${TARGET_BRANCH}"
|
||||
else
|
||||
echo "Patch Release: $PROWLER_VERSION"
|
||||
|
||||
BUMP_VERSION_TO=${MAJOR_VERSION}.${MINOR_VERSION}.$((FIX_VERSION + 1))
|
||||
echo "BUMP_VERSION_TO=${BUMP_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
TARGET_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping to next patch version: ${BUMP_VERSION_TO} in branch ${TARGET_BRANCH}"
|
||||
fi
|
||||
else
|
||||
echo "Releasing another Prowler major version, aborting..."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Invalid version syntax: '$PROWLER_VERSION' (must be N.N.N)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Bump versions in files
|
||||
run: |
|
||||
echo "Using PROWLER_VERSION=$PROWLER_VERSION"
|
||||
echo "Using BUMP_VERSION_TO=$BUMP_VERSION_TO"
|
||||
|
||||
set -e
|
||||
|
||||
echo "Bumping version in pyproject.toml ..."
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${BUMP_VERSION_TO}\"|" pyproject.toml
|
||||
|
||||
echo "Bumping version in prowler/config/config.py ..."
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${BUMP_VERSION_TO}\"|" prowler/config/config.py
|
||||
|
||||
echo "Bumping version in .env ..."
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${BUMP_VERSION_TO}|" .env
|
||||
|
||||
git --no-pager diff
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.TARGET_BRANCH }}
|
||||
commit-message: "chore(release): Bump version to v${{ env.BUMP_VERSION_TO }}"
|
||||
branch: "version-bump-to-v${{ env.BUMP_VERSION_TO }}"
|
||||
title: "chore(release): Bump version to v${{ env.BUMP_VERSION_TO }}"
|
||||
body: |
|
||||
### Description
|
||||
|
||||
Bump Prowler version to v${{ env.BUMP_VERSION_TO }}
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
@@ -21,6 +21,7 @@ on:
|
||||
paths-ignore:
|
||||
- 'ui/**'
|
||||
- 'api/**'
|
||||
- '.github/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
@@ -30,6 +31,7 @@ on:
|
||||
paths-ignore:
|
||||
- 'ui/**'
|
||||
- 'api/**'
|
||||
- '.github/**'
|
||||
schedule:
|
||||
- cron: '00 12 * * *'
|
||||
|
||||
|
||||
@@ -107,11 +107,102 @@ jobs:
|
||||
run: |
|
||||
/tmp/hadolint Dockerfile --ignore=DL3013
|
||||
|
||||
- name: Test with pytest
|
||||
# Test AWS
|
||||
- name: AWS - Check if any file has changed
|
||||
id: aws-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/aws/**
|
||||
./tests/providers/aws/**
|
||||
|
||||
- name: AWS - Test
|
||||
if: steps.aws-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
|
||||
# Test Azure
|
||||
- name: Azure - Check if any file has changed
|
||||
id: azure-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/azure/**
|
||||
./tests/providers/azure/**
|
||||
|
||||
- name: Azure - Test
|
||||
if: steps.azure-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/azure --cov-report=xml:azure_coverage.xml tests/providers/azure
|
||||
|
||||
# Test GCP
|
||||
- name: GCP - Check if any file has changed
|
||||
id: gcp-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/gcp/**
|
||||
./tests/providers/gcp/**
|
||||
|
||||
- name: GCP - Test
|
||||
if: steps.gcp-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/gcp --cov-report=xml:gcp_coverage.xml tests/providers/gcp
|
||||
|
||||
# Test Kubernetes
|
||||
- name: Kubernetes - Check if any file has changed
|
||||
id: kubernetes-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/kubernetes/**
|
||||
./tests/providers/kubernetes/**
|
||||
|
||||
- name: Kubernetes - Test
|
||||
if: steps.kubernetes-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/kubernetes --cov-report=xml:kubernetes_coverage.xml tests/providers/kubernetes
|
||||
|
||||
# Test NHN
|
||||
- name: NHN - Check if any file has changed
|
||||
id: nhn-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/nhn/**
|
||||
./tests/providers/nhn/**
|
||||
|
||||
- name: NHN - Test
|
||||
if: steps.nhn-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/nhn --cov-report=xml:nhn_coverage.xml tests/providers/nhn
|
||||
|
||||
# Test M365
|
||||
- name: M365 - Check if any file has changed
|
||||
id: m365-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/m365/**
|
||||
./tests/providers/m365/**
|
||||
|
||||
- name: M365 - Test
|
||||
if: steps.m365-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/m365 --cov-report=xml:m365_coverage.xml tests/providers/m365
|
||||
|
||||
# Common Tests
|
||||
- name: Lib - Test
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler --cov-report=xml tests
|
||||
poetry run pytest -n auto --cov=./prowler/lib --cov-report=xml:lib_coverage.xml tests/lib
|
||||
|
||||
- name: Config - Test
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/config --cov-report=xml:config_coverage.xml tests/config
|
||||
|
||||
# Codecov
|
||||
- name: Upload coverage reports to Codecov
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5.4.2
|
||||
@@ -119,3 +210,4 @@ jobs:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler
|
||||
files: ./aws_coverage.xml,./azure_coverage.xml,./gcp_coverage.xml,./kubernetes_coverage.xml,./nhn_coverage.xml,./m365_coverage.xml,./lib_coverage.xml,./config_coverage.xml
|
||||
|
||||
@@ -115,7 +115,7 @@ repos:
|
||||
- id: safety
|
||||
name: safety
|
||||
description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities"
|
||||
entry: bash -c 'safety check --ignore 70612,66963'
|
||||
entry: bash -c 'safety check --ignore 70612,66963,74429'
|
||||
language: system
|
||||
|
||||
- id: vulture
|
||||
|
||||
+32
-10
@@ -1,24 +1,43 @@
|
||||
FROM python:3.12.10-alpine3.20
|
||||
FROM python:3.12.10-slim-bookworm AS build
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/prowler"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prowler-cloud/prowler"
|
||||
|
||||
# Update system dependencies and install essential tools
|
||||
#hadolint ignore=DL3018
|
||||
RUN apk --no-cache upgrade && apk --no-cache add curl git gcc python3-dev musl-dev linux-headers
|
||||
ARG POWERSHELL_VERSION=7.5.0
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends wget libicu72 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install PowerShell
|
||||
RUN ARCH=$(uname -m) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then \
|
||||
wget --progress=dot:giga https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/powershell-${POWERSHELL_VERSION}-linux-x64.tar.gz -O /tmp/powershell.tar.gz ; \
|
||||
elif [ "$ARCH" = "aarch64" ]; then \
|
||||
wget --progress=dot:giga https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/powershell-${POWERSHELL_VERSION}-linux-arm64.tar.gz -O /tmp/powershell.tar.gz ; \
|
||||
else \
|
||||
echo "Unsupported architecture: $ARCH" && exit 1 ; \
|
||||
fi && \
|
||||
mkdir -p /opt/microsoft/powershell/7 && \
|
||||
tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 && \
|
||||
chmod +x /opt/microsoft/powershell/7/pwsh && \
|
||||
ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh && \
|
||||
rm /tmp/powershell.tar.gz
|
||||
|
||||
# Add prowler user
|
||||
RUN addgroup --gid 1000 prowler && \
|
||||
adduser --uid 1000 --gid 1000 --disabled-password --gecos "" prowler
|
||||
|
||||
# Create non-root user
|
||||
RUN mkdir -p /home/prowler && \
|
||||
echo 'prowler:x:1000:1000:prowler:/home/prowler:' > /etc/passwd && \
|
||||
echo 'prowler:x:1000:' > /etc/group && \
|
||||
chown -R prowler:prowler /home/prowler
|
||||
USER prowler
|
||||
|
||||
# Copy necessary files
|
||||
WORKDIR /home/prowler
|
||||
|
||||
# Copy necessary files
|
||||
COPY prowler/ /home/prowler/prowler/
|
||||
COPY dashboard/ /home/prowler/dashboard/
|
||||
COPY pyproject.toml /home/prowler
|
||||
COPY README.md /home/prowler/
|
||||
COPY prowler/providers/m365/lib/powershell/m365_powershell.py /home/prowler/prowler/providers/m365/lib/powershell/m365_powershell.py
|
||||
|
||||
# Install Python dependencies
|
||||
ENV HOME='/home/prowler'
|
||||
@@ -34,6 +53,9 @@ RUN pip install --no-cache-dir --upgrade pip && \
|
||||
RUN poetry install --compile && \
|
||||
rm -rf ~/.cache/pip
|
||||
|
||||
# Install PowerShell modules
|
||||
RUN poetry run python prowler/providers/m365/lib/powershell/m365_powershell.py
|
||||
|
||||
# Remove deprecated dash dependencies
|
||||
RUN pip uninstall dash-html-components -y && \
|
||||
pip uninstall dash-core-components -y
|
||||
|
||||
@@ -75,7 +75,7 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
| GCP | 79 | 13 | 7 | 3 |
|
||||
| Azure | 140 | 18 | 8 | 3 |
|
||||
| Kubernetes | 83 | 7 | 4 | 7 |
|
||||
| M365 | 5 | 2 | 1 | 0 |
|
||||
| M365 | 44 | 2 | 1 | 0 |
|
||||
| NHN (Unofficial) | 6 | 2 | 1 | 0 |
|
||||
|
||||
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
|
||||
@@ -80,7 +80,7 @@ repos:
|
||||
- id: safety
|
||||
name: safety
|
||||
description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities"
|
||||
entry: bash -c 'poetry run safety check --ignore 70612,66963'
|
||||
entry: bash -c 'poetry run safety check --ignore 70612,66963,74429'
|
||||
language: system
|
||||
|
||||
- id: vulture
|
||||
|
||||
@@ -3,6 +3,16 @@
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
|
||||
## [v1.7.0] (UNRELEASED)
|
||||
|
||||
### Added
|
||||
|
||||
- Added M365 as a new provider [(#7563)](https://github.com/prowler-cloud/prowler/pull/7563).
|
||||
- Added a `compliance/` folder and ZIP‐export functionality for all compliance reports.[(#7653)](https://github.com/prowler-cloud/prowler/pull/7653).
|
||||
- Added a new API endpoint to fetch and download any specific compliance file by name [(#7653)](https://github.com/prowler-cloud/prowler/pull/7653).
|
||||
|
||||
---
|
||||
|
||||
## [v1.6.0] (Prowler v5.5.0)
|
||||
|
||||
### Added
|
||||
|
||||
+29
-14
@@ -1,13 +1,33 @@
|
||||
FROM python:3.12.8-alpine3.20 AS build
|
||||
FROM python:3.12.10-slim-bookworm AS build
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/api"
|
||||
|
||||
# hadolint ignore=DL3018
|
||||
RUN apk --no-cache add gcc python3-dev musl-dev linux-headers curl-dev
|
||||
ARG POWERSHELL_VERSION=7.5.0
|
||||
ENV POWERSHELL_VERSION=${POWERSHELL_VERSION}
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends wget libicu72 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install PowerShell
|
||||
RUN ARCH=$(uname -m) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then \
|
||||
wget --progress=dot:giga https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/powershell-${POWERSHELL_VERSION}-linux-x64.tar.gz -O /tmp/powershell.tar.gz ; \
|
||||
elif [ "$ARCH" = "aarch64" ]; then \
|
||||
wget --progress=dot:giga https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/powershell-${POWERSHELL_VERSION}-linux-arm64.tar.gz -O /tmp/powershell.tar.gz ; \
|
||||
else \
|
||||
echo "Unsupported architecture: $ARCH" && exit 1 ; \
|
||||
fi && \
|
||||
mkdir -p /opt/microsoft/powershell/7 && \
|
||||
tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 && \
|
||||
chmod +x /opt/microsoft/powershell/7/pwsh && \
|
||||
ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh && \
|
||||
rm /tmp/powershell.tar.gz
|
||||
|
||||
# Add prowler user
|
||||
RUN addgroup --gid 1000 prowler && \
|
||||
adduser --uid 1000 --gid 1000 --disabled-password --gecos "" prowler
|
||||
|
||||
RUN apk --no-cache upgrade && \
|
||||
addgroup -g 1000 prowler && \
|
||||
adduser -D -u 1000 -G prowler prowler
|
||||
USER prowler
|
||||
|
||||
WORKDIR /home/prowler
|
||||
@@ -17,7 +37,7 @@ COPY pyproject.toml ./
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir poetry
|
||||
|
||||
COPY src/backend/ ./backend/
|
||||
COPY src/backend/ ./backend/
|
||||
|
||||
ENV PATH="/home/prowler/.local/bin:$PATH"
|
||||
|
||||
@@ -27,18 +47,13 @@ RUN poetry install --no-root && \
|
||||
|
||||
COPY docker-entrypoint.sh ./docker-entrypoint.sh
|
||||
|
||||
RUN poetry run python "$(poetry env info --path)/src/prowler/prowler/providers/m365/lib/powershell/m365_powershell.py"
|
||||
|
||||
WORKDIR /home/prowler/backend
|
||||
|
||||
# Development image
|
||||
# hadolint ignore=DL3006
|
||||
FROM build AS dev
|
||||
|
||||
USER 0
|
||||
# hadolint ignore=DL3018
|
||||
RUN apk --no-cache add curl vim
|
||||
|
||||
USER prowler
|
||||
|
||||
ENTRYPOINT ["../docker-entrypoint.sh", "dev"]
|
||||
|
||||
# Production image
|
||||
|
||||
Generated
+969
-914
File diff suppressed because it is too large
Load Diff
+3
-2
@@ -7,7 +7,7 @@ authors = [{name = "Prowler Engineering", email = "engineering@prowler.com"}]
|
||||
dependencies = [
|
||||
"celery[pytest] (>=5.4.0,<6.0.0)",
|
||||
"dj-rest-auth[with_social,jwt] (==7.0.1)",
|
||||
"django==5.1.7",
|
||||
"django==5.1.8",
|
||||
"django-allauth==65.4.1",
|
||||
"django-celery-beat (>=2.7.0,<3.0.0)",
|
||||
"django-celery-results (>=2.5.1,<3.0.0)",
|
||||
@@ -35,7 +35,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.6.0"
|
||||
version = "1.7.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
@@ -46,6 +46,7 @@ coverage = "7.5.4"
|
||||
django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
freezegun = "1.5.1"
|
||||
marshmallow = ">=3.15.0,<4.0.0"
|
||||
mypy = "1.10.1"
|
||||
pylint = "3.2.5"
|
||||
pytest = "8.2.2"
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
# Generated by Django 5.1.7 on 2025-04-16 08:47
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
import api.db_utils
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0016_finding_compliance_resource_details_and_more"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="provider",
|
||||
name="provider",
|
||||
field=api.db_utils.ProviderEnumField(
|
||||
choices=[
|
||||
("aws", "AWS"),
|
||||
("azure", "Azure"),
|
||||
("gcp", "GCP"),
|
||||
("kubernetes", "Kubernetes"),
|
||||
("m365", "M365"),
|
||||
],
|
||||
default="aws",
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'm365';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
@@ -191,6 +191,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
AZURE = "azure", _("Azure")
|
||||
GCP = "gcp", _("GCP")
|
||||
KUBERNETES = "kubernetes", _("Kubernetes")
|
||||
M365 = "m365", _("M365")
|
||||
|
||||
@staticmethod
|
||||
def validate_aws_uid(value):
|
||||
@@ -214,6 +215,15 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def validate_m365_uid(value):
|
||||
if not re.match(r"^[a-zA-Z0-9-]+\.onmicrosoft\.com$", value):
|
||||
raise ModelValidationError(
|
||||
detail="M365 tenant ID must be a valid domain.",
|
||||
code="m365-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def validate_gcp_uid(value):
|
||||
if not re.match(r"^[a-z][a-z0-9-]{5,29}$", value):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.6.0
|
||||
version: 1.7.0
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
@@ -83,11 +83,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -99,6 +101,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -106,6 +109,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -450,11 +454,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -466,6 +472,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -473,6 +480,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -962,11 +970,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -978,6 +988,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -985,6 +996,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -1395,11 +1407,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -1411,6 +1425,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -1418,6 +1433,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2047,11 +2063,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -2063,6 +2081,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -2070,6 +2089,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2204,11 +2224,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -2220,6 +2242,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -2227,6 +2250,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2377,11 +2401,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -2393,6 +2419,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -2400,6 +2427,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -2863,11 +2891,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider__in]
|
||||
schema:
|
||||
@@ -3441,11 +3471,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -3457,6 +3489,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -3464,6 +3497,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -4167,11 +4201,13 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
@@ -4183,6 +4219,7 @@ paths:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
@@ -4190,6 +4227,7 @@ paths:
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
@@ -4465,6 +4503,47 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ScanUpdateResponse'
|
||||
description: ''
|
||||
/api/v1/scans/{id}/compliance/{name}:
|
||||
get:
|
||||
operationId: scan_compliance_download
|
||||
description: Download a specific compliance report (e.g., 'cis_1.4_aws') as
|
||||
a CSV file.
|
||||
summary: Retrieve compliance report as CSV
|
||||
parameters:
|
||||
- in: query
|
||||
name: fields[scan-reports]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- id
|
||||
- name
|
||||
description: endpoint return only specific fields in the response on a per-type
|
||||
basis by including a fields[TYPE] query parameter.
|
||||
explode: false
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
description: A UUID string identifying this scan.
|
||||
required: true
|
||||
- in: path
|
||||
name: name
|
||||
schema:
|
||||
type: string
|
||||
description: The compliance report name, like 'cis_1.4_aws'
|
||||
required: true
|
||||
tags:
|
||||
- Scan
|
||||
security:
|
||||
- jwtAuth: []
|
||||
responses:
|
||||
'200':
|
||||
description: CSV file containing the compliance report
|
||||
'404':
|
||||
description: Compliance report not found
|
||||
/api/v1/scans/{id}/report:
|
||||
get:
|
||||
operationId: scans_report_retrieve
|
||||
@@ -8347,6 +8426,33 @@ components:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- type: object
|
||||
title: M365 Static Credentials
|
||||
properties:
|
||||
client_id:
|
||||
type: string
|
||||
description: The Azure application (client) ID for authentication
|
||||
in Azure AD.
|
||||
client_secret:
|
||||
type: string
|
||||
description: The client secret associated with the application
|
||||
(client) ID, providing secure access.
|
||||
tenant_id:
|
||||
type: string
|
||||
description: The Azure tenant ID, representing the directory
|
||||
where the application is registered.
|
||||
user:
|
||||
type: email
|
||||
description: User microsoft email address.
|
||||
encrypted_password:
|
||||
type: string
|
||||
description: User encrypted password.
|
||||
required:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- user
|
||||
- encrypted_password
|
||||
- type: object
|
||||
title: GCP Static Credentials
|
||||
properties:
|
||||
@@ -8814,12 +8920,14 @@ components:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
uid:
|
||||
type: string
|
||||
title: Unique identifier for the provider, set by the provider
|
||||
@@ -8926,12 +9034,14 @@ components:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
uid:
|
||||
type: string
|
||||
title: Unique identifier for the provider, set by the provider
|
||||
@@ -8969,12 +9079,14 @@ components:
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
- m365
|
||||
type: string
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
* `m365` - M365
|
||||
uid:
|
||||
type: string
|
||||
minLength: 3
|
||||
@@ -9559,6 +9671,33 @@ components:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- type: object
|
||||
title: M365 Static Credentials
|
||||
properties:
|
||||
client_id:
|
||||
type: string
|
||||
description: The Azure application (client) ID for authentication
|
||||
in Azure AD.
|
||||
client_secret:
|
||||
type: string
|
||||
description: The client secret associated with the application
|
||||
(client) ID, providing secure access.
|
||||
tenant_id:
|
||||
type: string
|
||||
description: The Azure tenant ID, representing the directory where
|
||||
the application is registered.
|
||||
user:
|
||||
type: email
|
||||
description: User microsoft email address.
|
||||
encrypted_password:
|
||||
type: string
|
||||
description: User encrypted password.
|
||||
required:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- user
|
||||
- encrypted_password
|
||||
- type: object
|
||||
title: GCP Static Credentials
|
||||
properties:
|
||||
@@ -9741,6 +9880,33 @@ components:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- type: object
|
||||
title: M365 Static Credentials
|
||||
properties:
|
||||
client_id:
|
||||
type: string
|
||||
description: The Azure application (client) ID for authentication
|
||||
in Azure AD.
|
||||
client_secret:
|
||||
type: string
|
||||
description: The client secret associated with the application
|
||||
(client) ID, providing secure access.
|
||||
tenant_id:
|
||||
type: string
|
||||
description: The Azure tenant ID, representing the directory
|
||||
where the application is registered.
|
||||
user:
|
||||
type: email
|
||||
description: User microsoft email address.
|
||||
encrypted_password:
|
||||
type: string
|
||||
description: User encrypted password.
|
||||
required:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- user
|
||||
- encrypted_password
|
||||
- type: object
|
||||
title: GCP Static Credentials
|
||||
properties:
|
||||
@@ -9939,6 +10105,33 @@ components:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- type: object
|
||||
title: M365 Static Credentials
|
||||
properties:
|
||||
client_id:
|
||||
type: string
|
||||
description: The Azure application (client) ID for authentication
|
||||
in Azure AD.
|
||||
client_secret:
|
||||
type: string
|
||||
description: The client secret associated with the application
|
||||
(client) ID, providing secure access.
|
||||
tenant_id:
|
||||
type: string
|
||||
description: The Azure tenant ID, representing the directory where
|
||||
the application is registered.
|
||||
user:
|
||||
type: email
|
||||
description: User microsoft email address.
|
||||
encrypted_password:
|
||||
type: string
|
||||
description: User encrypted password.
|
||||
required:
|
||||
- client_id
|
||||
- client_secret
|
||||
- tenant_id
|
||||
- user
|
||||
- encrypted_password
|
||||
- type: object
|
||||
title: GCP Static Credentials
|
||||
properties:
|
||||
|
||||
@@ -19,6 +19,7 @@ from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
|
||||
|
||||
class TestMergeDicts:
|
||||
@@ -104,6 +105,7 @@ class TestReturnProwlerProvider:
|
||||
(Provider.ProviderChoices.GCP.value, GcpProvider),
|
||||
(Provider.ProviderChoices.AZURE.value, AzureProvider),
|
||||
(Provider.ProviderChoices.KUBERNETES.value, KubernetesProvider),
|
||||
(Provider.ProviderChoices.M365.value, M365Provider),
|
||||
],
|
||||
)
|
||||
def test_return_prowler_provider(self, provider_type, expected_provider):
|
||||
@@ -176,6 +178,10 @@ class TestGetProwlerProviderKwargs:
|
||||
Provider.ProviderChoices.KUBERNETES.value,
|
||||
{"context": "provider_uid"},
|
||||
),
|
||||
(
|
||||
Provider.ProviderChoices.M365.value,
|
||||
{},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_prowler_provider_kwargs(self, provider_type, expected_extra_kwargs):
|
||||
|
||||
@@ -31,6 +31,7 @@ from api.models import (
|
||||
UserRoleRelationship,
|
||||
)
|
||||
from api.rls import Tenant
|
||||
from prowler.config.config import get_available_compliance_frameworks
|
||||
|
||||
TODAY = str(datetime.today().date())
|
||||
|
||||
@@ -2277,7 +2278,8 @@ class TestScanViewSet:
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.env", type("env", (), {"str": lambda self, key: bucket})()
|
||||
"api.v1.views.env",
|
||||
type("env", (), {"str": lambda self, *args, **kwargs: "test-bucket"})(),
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
@@ -2346,6 +2348,165 @@ class TestScanViewSet:
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{file_path.name}"' in content_disposition
|
||||
|
||||
def test_compliance_invalid_framework(self, authenticated_client, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.output_location = "dummy"
|
||||
scan.save()
|
||||
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": "invalid"})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert resp.json()["errors"]["detail"] == "Compliance 'invalid' not found."
|
||||
|
||||
def test_compliance_executing(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.EXECUTING
|
||||
scan.save()
|
||||
task = Task.objects.create(tenant_id=scan.tenant_id)
|
||||
scan.task = task
|
||||
scan.save()
|
||||
dummy = {"id": str(task.id), "state": StateChoices.EXECUTING}
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.TaskSerializer",
|
||||
lambda *args, **kwargs: type("S", (), {"data": dummy}),
|
||||
)
|
||||
|
||||
framework = get_available_compliance_frameworks(scan.provider.provider)[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_202_ACCEPTED
|
||||
assert "Content-Location" in resp
|
||||
assert dummy["id"] in resp["Content-Location"]
|
||||
|
||||
def test_compliance_no_output(self, authenticated_client, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.output_location = ""
|
||||
scan.save()
|
||||
|
||||
framework = get_available_compliance_frameworks(scan.provider.provider)[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert resp.json()["errors"]["detail"] == "The scan has no reports."
|
||||
|
||||
def test_compliance_s3_no_credentials(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
bucket = "bucket"
|
||||
key = "file.zip"
|
||||
scan.output_location = f"s3://{bucket}/{key}"
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.get_s3_client",
|
||||
lambda: (_ for _ in ()).throw(NoCredentialsError()),
|
||||
)
|
||||
|
||||
framework = get_available_compliance_frameworks(scan.provider.provider)[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_403_FORBIDDEN
|
||||
assert resp.json()["errors"]["detail"] == "There is a problem with credentials."
|
||||
|
||||
def test_compliance_s3_success(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
bucket = "bucket"
|
||||
prefix = "path/scan.zip"
|
||||
scan.output_location = f"s3://{bucket}/{prefix}"
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.env",
|
||||
type("env", (), {"str": lambda self, *args, **kwargs: "test-bucket"})(),
|
||||
)
|
||||
|
||||
match_key = "path/compliance/mitre_attack_aws.csv"
|
||||
|
||||
class FakeS3Client:
|
||||
def list_objects_v2(self, Bucket, Prefix):
|
||||
return {"Contents": [{"Key": match_key}]}
|
||||
|
||||
def get_object(self, Bucket, Key):
|
||||
return {"Body": io.BytesIO(b"ignored")}
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
framework = match_key.split("/")[-1].split(".")[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith('filename="mitre_attack_aws.csv"')
|
||||
|
||||
def test_compliance_s3_not_found(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
bucket = "bucket"
|
||||
scan.output_location = f"s3://{bucket}/x/scan.zip"
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.env",
|
||||
type("env", (), {"str": lambda self, *args, **kwargs: "test-bucket"})(),
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def list_objects_v2(self, Bucket, Prefix):
|
||||
return {"Contents": []}
|
||||
|
||||
def get_object(self, Bucket, Key):
|
||||
return {"Body": io.BytesIO(b"ignored")}
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": "cis_1.4_aws"})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert (
|
||||
resp.json()["errors"]["detail"]
|
||||
== "No compliance file found for name 'cis_1.4_aws'."
|
||||
)
|
||||
|
||||
def test_compliance_local_file(
|
||||
self, authenticated_client, scans_fixture, tmp_path, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
base = tmp_path / "reports"
|
||||
comp_dir = base / "compliance"
|
||||
comp_dir.mkdir(parents=True)
|
||||
fname = comp_dir / "scan_cis.csv"
|
||||
fname.write_bytes(b"ignored")
|
||||
|
||||
scan.output_location = str(base / "scan.zip")
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda p: [str(fname)] if p.endswith("*_cis_1.4_aws.csv") else [],
|
||||
)
|
||||
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": "cis_1.4_aws"})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith(f'filename="{fname.name}"')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTaskViewSet:
|
||||
|
||||
@@ -11,6 +11,7 @@ from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.common.models import Connection
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
|
||||
|
||||
class CustomOAuth2Client(OAuth2Client):
|
||||
@@ -51,14 +52,14 @@ def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
|
||||
|
||||
def return_prowler_provider(
|
||||
provider: Provider,
|
||||
) -> [AwsProvider | AzureProvider | GcpProvider | KubernetesProvider]:
|
||||
) -> [AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider]:
|
||||
"""Return the Prowler provider class based on the given provider type.
|
||||
|
||||
Args:
|
||||
provider (Provider): The provider object containing the provider type and associated secrets.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider: The corresponding provider class.
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: The corresponding provider class.
|
||||
|
||||
Raises:
|
||||
ValueError: If the provider type specified in `provider.provider` is not supported.
|
||||
@@ -72,6 +73,8 @@ def return_prowler_provider(
|
||||
prowler_provider = AzureProvider
|
||||
case Provider.ProviderChoices.KUBERNETES.value:
|
||||
prowler_provider = KubernetesProvider
|
||||
case Provider.ProviderChoices.M365.value:
|
||||
prowler_provider = M365Provider
|
||||
case _:
|
||||
raise ValueError(f"Provider type {provider.provider} not supported")
|
||||
return prowler_provider
|
||||
@@ -104,15 +107,15 @@ def get_prowler_provider_kwargs(provider: Provider) -> dict:
|
||||
|
||||
def initialize_prowler_provider(
|
||||
provider: Provider,
|
||||
) -> AwsProvider | AzureProvider | GcpProvider | KubernetesProvider:
|
||||
) -> AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider:
|
||||
"""Initialize a Prowler provider instance based on the given provider type.
|
||||
|
||||
Args:
|
||||
provider (Provider): The provider object containing the provider type and associated secrets.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, or `KubernetesProvider`) initialized with the
|
||||
AwsProvider | AzureProvider | GcpProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
|
||||
provider's secrets.
|
||||
"""
|
||||
prowler_provider = return_prowler_provider(provider)
|
||||
@@ -130,10 +133,12 @@ def prowler_provider_connection_test(provider: Provider) -> Connection:
|
||||
Connection: A connection object representing the result of the connection test for the specified provider.
|
||||
"""
|
||||
prowler_provider = return_prowler_provider(provider)
|
||||
|
||||
try:
|
||||
prowler_provider_kwargs = provider.secret.secret
|
||||
except Provider.secret.RelatedObjectDoesNotExist as secret_error:
|
||||
return Connection(is_connected=False, error=secret_error)
|
||||
|
||||
return prowler_provider.test_connection(
|
||||
**prowler_provider_kwargs, provider_id=provider.uid, raise_on_exception=False
|
||||
)
|
||||
|
||||
@@ -0,0 +1,172 @@
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from rest_framework_json_api import serializers
|
||||
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Static Credentials",
|
||||
"properties": {
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Required for environments where no IAM role is being "
|
||||
"assumed and direct AWS access is needed.",
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "The AWS secret access key. Must accompany 'aws_access_key_id' to authorize "
|
||||
"access to AWS resources.",
|
||||
},
|
||||
"aws_session_token": {
|
||||
"type": "string",
|
||||
"description": "The session token associated with temporary credentials. Only needed for "
|
||||
"session-based or temporary AWS access.",
|
||||
},
|
||||
},
|
||||
"required": ["aws_access_key_id", "aws_secret_access_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Assume Role",
|
||||
"properties": {
|
||||
"role_arn": {
|
||||
"type": "string",
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
"AWS credentials.",
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "The AWS secret access key. Required if 'aws_access_key_id' is provided or if "
|
||||
"no AWS credentials are pre-configured.",
|
||||
},
|
||||
"aws_session_token": {
|
||||
"type": "string",
|
||||
"description": "The session token for temporary credentials, if applicable.",
|
||||
},
|
||||
"session_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 900,
|
||||
"maximum": 43200,
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
"The regex used to validate this parameter is a string of characters consisting of "
|
||||
"upper- and lower-case alphanumeric characters with no spaces. You can also include "
|
||||
"underscores or any of the following characters: =,.@-\n\n"
|
||||
"Examples:\n"
|
||||
"- MySession123\n"
|
||||
"- User_Session-1\n"
|
||||
"- Test.Session@2",
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Azure Static Credentials",
|
||||
"properties": {
|
||||
"client_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure application (client) ID for authentication in Azure AD.",
|
||||
},
|
||||
"client_secret": {
|
||||
"type": "string",
|
||||
"description": "The client secret associated with the application (client) ID, providing "
|
||||
"secure access.",
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure tenant ID, representing the directory where the application is "
|
||||
"registered.",
|
||||
},
|
||||
},
|
||||
"required": ["client_id", "client_secret", "tenant_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "M365 Static Credentials",
|
||||
"properties": {
|
||||
"client_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure application (client) ID for authentication in Azure AD.",
|
||||
},
|
||||
"client_secret": {
|
||||
"type": "string",
|
||||
"description": "The client secret associated with the application (client) ID, providing "
|
||||
"secure access.",
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure tenant ID, representing the directory where the application is "
|
||||
"registered.",
|
||||
},
|
||||
"user": {
|
||||
"type": "email",
|
||||
"description": "User microsoft email address.",
|
||||
},
|
||||
"encrypted_password": {
|
||||
"type": "string",
|
||||
"description": "User encrypted password.",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"client_id",
|
||||
"client_secret",
|
||||
"tenant_id",
|
||||
"user",
|
||||
"encrypted_password",
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GCP Static Credentials",
|
||||
"properties": {
|
||||
"client_id": {
|
||||
"type": "string",
|
||||
"description": "The client ID from Google Cloud, used to identify the application for GCP "
|
||||
"access.",
|
||||
},
|
||||
"client_secret": {
|
||||
"type": "string",
|
||||
"description": "The client secret associated with the GCP client ID, required for secure "
|
||||
"access.",
|
||||
},
|
||||
"refresh_token": {
|
||||
"type": "string",
|
||||
"description": "A refresh token that allows the application to obtain new access tokens for "
|
||||
"extended use.",
|
||||
},
|
||||
},
|
||||
"required": ["client_id", "client_secret", "refresh_token"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Kubernetes Static Credentials",
|
||||
"properties": {
|
||||
"kubeconfig_content": {
|
||||
"type": "string",
|
||||
"description": "The content of the Kubernetes kubeconfig file, encoded as a string.",
|
||||
}
|
||||
},
|
||||
"required": ["kubeconfig_content"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
class ProviderSecretField(serializers.JSONField):
|
||||
pass
|
||||
@@ -42,6 +42,7 @@ from api.v1.serializer_utils.integrations import (
|
||||
IntegrationCredentialField,
|
||||
S3ConfigSerializer,
|
||||
)
|
||||
from api.v1.serializer_utils.providers import ProviderSecretField
|
||||
|
||||
# Tokens
|
||||
|
||||
@@ -959,6 +960,15 @@ class ScanReportSerializer(serializers.Serializer):
|
||||
fields = ["id"]
|
||||
|
||||
|
||||
class ScanComplianceReportSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(source="scan")
|
||||
name = serializers.CharField()
|
||||
|
||||
class Meta:
|
||||
resource_name = "scan-reports"
|
||||
fields = ["id", "name"]
|
||||
|
||||
|
||||
class ResourceTagSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ResourceTag model
|
||||
@@ -1141,6 +1151,8 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
serializer = GCPProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.KUBERNETES.value:
|
||||
serializer = KubernetesProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.M365.value:
|
||||
serializer = M365ProviderSecret(data=secret)
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{"provider": f"Provider type not supported {provider_type}"}
|
||||
@@ -1180,6 +1192,17 @@ class AzureProviderSecret(serializers.Serializer):
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class M365ProviderSecret(serializers.Serializer):
|
||||
client_id = serializers.CharField()
|
||||
client_secret = serializers.CharField()
|
||||
tenant_id = serializers.CharField()
|
||||
user = serializers.EmailField()
|
||||
encrypted_password = serializers.CharField()
|
||||
|
||||
class Meta:
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class GCPProviderSecret(serializers.Serializer):
|
||||
client_id = serializers.CharField()
|
||||
client_secret = serializers.CharField()
|
||||
@@ -1211,141 +1234,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Static Credentials",
|
||||
"properties": {
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Required for environments where no IAM role is being "
|
||||
"assumed and direct AWS access is needed.",
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "The AWS secret access key. Must accompany 'aws_access_key_id' to authorize "
|
||||
"access to AWS resources.",
|
||||
},
|
||||
"aws_session_token": {
|
||||
"type": "string",
|
||||
"description": "The session token associated with temporary credentials. Only needed for "
|
||||
"session-based or temporary AWS access.",
|
||||
},
|
||||
},
|
||||
"required": ["aws_access_key_id", "aws_secret_access_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Assume Role",
|
||||
"properties": {
|
||||
"role_arn": {
|
||||
"type": "string",
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
"AWS credentials.",
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "The AWS secret access key. Required if 'aws_access_key_id' is provided or if "
|
||||
"no AWS credentials are pre-configured.",
|
||||
},
|
||||
"aws_session_token": {
|
||||
"type": "string",
|
||||
"description": "The session token for temporary credentials, if applicable.",
|
||||
},
|
||||
"session_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 900,
|
||||
"maximum": 43200,
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
"The regex used to validate this parameter is a string of characters consisting of "
|
||||
"upper- and lower-case alphanumeric characters with no spaces. You can also include "
|
||||
"underscores or any of the following characters: =,.@-\n\n"
|
||||
"Examples:\n"
|
||||
"- MySession123\n"
|
||||
"- User_Session-1\n"
|
||||
"- Test.Session@2",
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Azure Static Credentials",
|
||||
"properties": {
|
||||
"client_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure application (client) ID for authentication in Azure AD.",
|
||||
},
|
||||
"client_secret": {
|
||||
"type": "string",
|
||||
"description": "The client secret associated with the application (client) ID, providing "
|
||||
"secure access.",
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"description": "The Azure tenant ID, representing the directory where the application is "
|
||||
"registered.",
|
||||
},
|
||||
},
|
||||
"required": ["client_id", "client_secret", "tenant_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GCP Static Credentials",
|
||||
"properties": {
|
||||
"client_id": {
|
||||
"type": "string",
|
||||
"description": "The client ID from Google Cloud, used to identify the application for GCP "
|
||||
"access.",
|
||||
},
|
||||
"client_secret": {
|
||||
"type": "string",
|
||||
"description": "The client secret associated with the GCP client ID, required for secure "
|
||||
"access.",
|
||||
},
|
||||
"refresh_token": {
|
||||
"type": "string",
|
||||
"description": "A refresh token that allows the application to obtain new access tokens for "
|
||||
"extended use.",
|
||||
},
|
||||
},
|
||||
"required": ["client_id", "client_secret", "refresh_token"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Kubernetes Static Credentials",
|
||||
"properties": {
|
||||
"kubeconfig_content": {
|
||||
"type": "string",
|
||||
"description": "The content of the Kubernetes kubeconfig file, encoded as a string.",
|
||||
}
|
||||
},
|
||||
"required": ["kubeconfig_content"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
class ProviderSecretField(serializers.JSONField):
|
||||
pass
|
||||
|
||||
|
||||
class ProviderSecretSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ProviderSecret model.
|
||||
|
||||
+182
-70
@@ -1,7 +1,6 @@
|
||||
import glob
|
||||
import os
|
||||
|
||||
import sentry_sdk
|
||||
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
|
||||
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
@@ -134,6 +133,7 @@ from api.v1.serializers import (
|
||||
RoleProviderGroupRelationshipSerializer,
|
||||
RoleSerializer,
|
||||
RoleUpdateSerializer,
|
||||
ScanComplianceReportSerializer,
|
||||
ScanCreateSerializer,
|
||||
ScanReportSerializer,
|
||||
ScanSerializer,
|
||||
@@ -150,6 +150,7 @@ from api.v1.serializers import (
|
||||
UserSerializer,
|
||||
UserUpdateSerializer,
|
||||
)
|
||||
from prowler.config.config import get_available_compliance_frameworks
|
||||
|
||||
CACHE_DECORATOR = cache_control(
|
||||
max_age=django_settings.CACHE_MAX_AGE,
|
||||
@@ -247,7 +248,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.6.0"
|
||||
spectacular_settings.VERSION = "1.7.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -1150,6 +1151,28 @@ class ProviderViewSet(BaseRLSViewSet):
|
||||
404: OpenApiResponse(description="The scan has no reports"),
|
||||
},
|
||||
),
|
||||
compliance=extend_schema(
|
||||
tags=["Scan"],
|
||||
summary="Retrieve compliance report as CSV",
|
||||
description="Download a specific compliance report (e.g., 'cis_1.4_aws') as a CSV file.",
|
||||
operation_id="scan_compliance_download",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="name",
|
||||
type=str,
|
||||
location=OpenApiParameter.PATH,
|
||||
required=True,
|
||||
description="The compliance report name, like 'cis_1.4_aws'",
|
||||
),
|
||||
],
|
||||
responses={
|
||||
200: OpenApiResponse(
|
||||
description="CSV file containing the compliance report"
|
||||
),
|
||||
404: OpenApiResponse(description="Compliance report not found"),
|
||||
},
|
||||
request=None,
|
||||
),
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
@method_decorator(CACHE_DECORATOR, name="retrieve")
|
||||
@@ -1202,6 +1225,10 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
if hasattr(self, "response_serializer_class"):
|
||||
return self.response_serializer_class
|
||||
return ScanReportSerializer
|
||||
elif self.action == "compliance":
|
||||
if hasattr(self, "response_serializer_class"):
|
||||
return self.response_serializer_class
|
||||
return ScanComplianceReportSerializer
|
||||
return super().get_serializer_class()
|
||||
|
||||
def partial_update(self, request, *args, **kwargs):
|
||||
@@ -1219,70 +1246,86 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
)
|
||||
return Response(data=read_serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=True, methods=["get"], url_name="report")
|
||||
def report(self, request, pk=None):
|
||||
scan_instance = self.get_object()
|
||||
def _get_task_status(self, scan_instance):
|
||||
"""
|
||||
If the scan or its report-generation task is still executing,
|
||||
return an `HTTP 202 Accepted` response with the task payload and Content-Location.
|
||||
"""
|
||||
task = None
|
||||
|
||||
if scan_instance.state == StateChoices.EXECUTING:
|
||||
# If the scan is still running, return the task
|
||||
prowler_task = Task.objects.get(id=scan_instance.task.id)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(prowler_task)
|
||||
return Response(
|
||||
data=output_serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": output_serializer.data["id"]}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
output_celery_task = Task.objects.get(
|
||||
task_runner_task__task_name="scan-report",
|
||||
task_runner_task__task_args__contains=pk,
|
||||
)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(output_celery_task)
|
||||
if output_serializer.data["state"] == StateChoices.EXECUTING:
|
||||
# If the task is still running, return the task
|
||||
return Response(
|
||||
data=output_serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": output_serializer.data["id"]}
|
||||
)
|
||||
},
|
||||
)
|
||||
except Task.DoesNotExist:
|
||||
# If the task does not exist, it means that the task is removed from the database
|
||||
pass
|
||||
|
||||
output_location = scan_instance.output_location
|
||||
if not output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
if scan_instance.output_location.startswith("s3://"):
|
||||
if scan_instance.state == StateChoices.EXECUTING and scan_instance.task:
|
||||
task = scan_instance.task
|
||||
else:
|
||||
try:
|
||||
s3_client = get_s3_client()
|
||||
task = Task.objects.get(
|
||||
task_runner_task__task_name="scan-report",
|
||||
task_runner_task__task_args__contains=str(scan_instance.id),
|
||||
)
|
||||
except Task.DoesNotExist:
|
||||
return None
|
||||
|
||||
self.response_serializer_class = TaskSerializer
|
||||
serializer = self.get_serializer(task)
|
||||
|
||||
if serializer.data.get("state") != StateChoices.EXECUTING:
|
||||
return None
|
||||
|
||||
return Response(
|
||||
data=serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": serializer.data["id"]}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
def _load_file(self, path_pattern, s3=False, bucket=None, list_objects=False):
|
||||
"""
|
||||
Load binary content and filename.
|
||||
If s3=True and list_objects=False: treat path_pattern as exact key.
|
||||
If s3=True and list_objects=True: list by prefix, then pick first matching key.
|
||||
Else: treat path_pattern as glob pattern on local FS.
|
||||
Returns (content, filename) or Response on error.
|
||||
"""
|
||||
if s3:
|
||||
try:
|
||||
client = get_s3_client()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError):
|
||||
return Response(
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
|
||||
bucket_name = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET")
|
||||
key = output_location[len(f"s3://{bucket_name}/") :]
|
||||
if list_objects:
|
||||
# list keys under prefix then match suffix
|
||||
prefix = os.path.dirname(path_pattern)
|
||||
suffix = os.path.basename(path_pattern)
|
||||
try:
|
||||
resp = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
||||
except ClientError:
|
||||
return Response(
|
||||
{"detail": "Failed to list compliance files in S3."},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
contents = resp.get("Contents", [])
|
||||
keys = [obj["Key"] for obj in contents if obj["Key"].endswith(suffix)]
|
||||
if not keys:
|
||||
return Response(
|
||||
{
|
||||
"detail": f"No compliance file found for name '{os.path.splitext(suffix)[0]}'."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
# path_pattern here is prefix, but in compliance we build correct suffix check before
|
||||
key = keys[0]
|
||||
else:
|
||||
# path_pattern is exact key
|
||||
key = path_pattern
|
||||
try:
|
||||
s3_object = s3_client.get_object(Bucket=bucket_name, Key=key)
|
||||
s3_obj = client.get_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code")
|
||||
if error_code == "NoSuchKey":
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code == "NoSuchKey":
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
@@ -1291,28 +1334,97 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
file_content = s3_object["Body"].read()
|
||||
filename = os.path.basename(output_location.split("/")[-1])
|
||||
content = s3_obj["Body"].read()
|
||||
filename = os.path.basename(key)
|
||||
else:
|
||||
zip_files = glob.glob(output_location)
|
||||
try:
|
||||
file_path = zip_files[0]
|
||||
except IndexError as e:
|
||||
sentry_sdk.capture_exception(e)
|
||||
files = glob.glob(path_pattern)
|
||||
if not files:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
with open(file_path, "rb") as f:
|
||||
file_content = f.read()
|
||||
filename = os.path.basename(file_path)
|
||||
filepath = files[0]
|
||||
with open(filepath, "rb") as f:
|
||||
content = f.read()
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
response = HttpResponse(
|
||||
file_content, content_type="application/x-zip-compressed"
|
||||
)
|
||||
return content, filename
|
||||
|
||||
def _serve_file(self, content, filename, content_type):
|
||||
response = HttpResponse(content, content_type=content_type)
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
|
||||
return response
|
||||
|
||||
@action(detail=True, methods=["get"], url_name="report")
|
||||
def report(self, request, pk=None):
|
||||
scan = self.get_object()
|
||||
# Check for executing tasks
|
||||
running_resp = self._get_task_status(scan)
|
||||
if running_resp:
|
||||
return running_resp
|
||||
|
||||
if not scan.output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."}, status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
if scan.output_location.startswith("s3://"):
|
||||
bucket = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
key_prefix = scan.output_location.removeprefix(f"s3://{bucket}/")
|
||||
loader = self._load_file(
|
||||
key_prefix, s3=True, bucket=bucket, list_objects=False
|
||||
)
|
||||
else:
|
||||
loader = self._load_file(scan.output_location, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
return self._serve_file(content, filename, "application/x-zip-compressed")
|
||||
|
||||
@action(
|
||||
detail=True,
|
||||
methods=["get"],
|
||||
url_path="compliance/(?P<name>[^/]+)",
|
||||
url_name="compliance",
|
||||
)
|
||||
def compliance(self, request, pk=None, name=None):
|
||||
scan = self.get_object()
|
||||
if name not in get_available_compliance_frameworks(scan.provider.provider):
|
||||
return Response(
|
||||
{"detail": f"Compliance '{name}' not found."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
running_resp = self._get_task_status(scan)
|
||||
if running_resp:
|
||||
return running_resp
|
||||
|
||||
if not scan.output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."}, status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
if scan.output_location.startswith("s3://"):
|
||||
bucket = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
key_prefix = scan.output_location.removeprefix(f"s3://{bucket}/")
|
||||
prefix = os.path.join(
|
||||
os.path.dirname(key_prefix), "compliance", f"{name}.csv"
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "compliance", f"*_{name}.csv")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
return self._serve_file(content, filename, "text/csv")
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
input_serializer = self.get_serializer(data=request.data)
|
||||
input_serializer.is_valid(raise_exception=True)
|
||||
|
||||
@@ -39,6 +39,9 @@ IGNORED_EXCEPTIONS = [
|
||||
"RequestExpired",
|
||||
"ConnectionClosedError",
|
||||
"MaxRetryError",
|
||||
"AWSAccessKeyIDInvalidError",
|
||||
"AWSSessionTokenExpiredError",
|
||||
"EndpointConnectionError", # AWS Service is not available in a region
|
||||
"Pool is closed", # The following comes from urllib3: eu-west-1 -- HTTPClientError[126]: An HTTP Client raised an unhandled exception: AWSHTTPSConnectionPool(host='hostname.s3.eu-west-1.amazonaws.com', port=443): Pool is closed.
|
||||
# Authentication Errors from GCP
|
||||
"ClientAuthenticationError",
|
||||
@@ -63,8 +66,6 @@ IGNORED_EXCEPTIONS = [
|
||||
"AzureClientIdAndClientSecretNotBelongingToTenantIdError",
|
||||
"AzureHTTPResponseError",
|
||||
"Error with credentials provided",
|
||||
# AWS Service is not available in a region
|
||||
"EndpointConnectionError",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,39 @@ from prowler.config.config import (
|
||||
json_ocsf_file_suffix,
|
||||
output_file_timestamp,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected import (
|
||||
AWSWellArchitected,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_azure import AzureENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_gcp import GCPENS
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_aws import AWSISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_azure import AzureISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_gcp import GCPISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_kubernetes import (
|
||||
KubernetesISO27001,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_nhn import NHNISO27001
|
||||
from prowler.lib.outputs.compliance.kisa_ismsp.kisa_ismsp_aws import AWSKISAISMSP
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_aws import AWSMitreAttack
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
|
||||
AzureMitreAttack,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_gcp import GCPMitreAttack
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
|
||||
ProwlerThreatScoreAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azure import (
|
||||
ProwlerThreatScoreAzure,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
|
||||
ProwlerThreatScoreGCP,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
@@ -20,6 +53,46 @@ from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
COMPLIANCE_CLASS_MAP = {
|
||||
"aws": [
|
||||
(lambda name: name.startswith("cis_"), AWSCIS),
|
||||
(lambda name: name == "mitre_attack_aws", AWSMitreAttack),
|
||||
(lambda name: name.startswith("ens_"), AWSENS),
|
||||
(
|
||||
lambda name: name.startswith("aws_well_architected_framework"),
|
||||
AWSWellArchitected,
|
||||
),
|
||||
(lambda name: name.startswith("iso27001_"), AWSISO27001),
|
||||
(lambda name: name.startswith("kisa"), AWSKISAISMSP),
|
||||
(lambda name: name == "prowler_threatscore_aws", ProwlerThreatScoreAWS),
|
||||
],
|
||||
"azure": [
|
||||
(lambda name: name.startswith("cis_"), AzureCIS),
|
||||
(lambda name: name == "mitre_attack_azure", AzureMitreAttack),
|
||||
(lambda name: name.startswith("ens_"), AzureENS),
|
||||
(lambda name: name.startswith("iso27001_"), AzureISO27001),
|
||||
(lambda name: name == "prowler_threatscore_azure", ProwlerThreatScoreAzure),
|
||||
],
|
||||
"gcp": [
|
||||
(lambda name: name.startswith("cis_"), GCPCIS),
|
||||
(lambda name: name == "mitre_attack_gcp", GCPMitreAttack),
|
||||
(lambda name: name.startswith("ens_"), GCPENS),
|
||||
(lambda name: name.startswith("iso27001_"), GCPISO27001),
|
||||
(lambda name: name == "prowler_threatscore_gcp", ProwlerThreatScoreGCP),
|
||||
],
|
||||
"kubernetes": [
|
||||
(lambda name: name.startswith("cis_"), KubernetesCIS),
|
||||
(lambda name: name.startswith("iso27001_"), KubernetesISO27001),
|
||||
],
|
||||
"m365": [
|
||||
(lambda name: name.startswith("cis_"), M365CIS),
|
||||
],
|
||||
"nhn": [
|
||||
(lambda name: name.startswith("iso27001_"), NHNISO27001),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# Predefined mapping for output formats and their configurations
|
||||
OUTPUT_FORMATS_MAPPING = {
|
||||
"csv": {
|
||||
@@ -43,13 +116,17 @@ def _compress_output_files(output_directory: str) -> str:
|
||||
str: The full path to the newly created ZIP archive.
|
||||
"""
|
||||
zip_path = f"{output_directory}.zip"
|
||||
parent_dir = os.path.dirname(output_directory)
|
||||
zip_path_abs = os.path.abspath(zip_path)
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
||||
for suffix in [config["suffix"] for config in OUTPUT_FORMATS_MAPPING.values()]:
|
||||
zipf.write(
|
||||
f"{output_directory}{suffix}",
|
||||
f"output/{output_directory.split('/')[-1]}{suffix}",
|
||||
)
|
||||
for foldername, _, filenames in os.walk(parent_dir):
|
||||
for filename in filenames:
|
||||
file_path = os.path.join(foldername, filename)
|
||||
if os.path.abspath(file_path) == zip_path_abs:
|
||||
continue
|
||||
arcname = os.path.relpath(file_path, start=parent_dir)
|
||||
zipf.write(file_path, arcname)
|
||||
|
||||
return zip_path
|
||||
|
||||
@@ -102,25 +179,38 @@ def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
Raises:
|
||||
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
|
||||
"""
|
||||
if not base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET:
|
||||
return
|
||||
bucket = base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET
|
||||
if not bucket:
|
||||
return None
|
||||
|
||||
try:
|
||||
s3 = get_s3_client()
|
||||
s3_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
|
||||
|
||||
# Upload the ZIP file (outputs) to the S3 bucket
|
||||
zip_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
|
||||
s3.upload_file(
|
||||
Filename=zip_path,
|
||||
Bucket=base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET,
|
||||
Key=s3_key,
|
||||
Bucket=bucket,
|
||||
Key=zip_key,
|
||||
)
|
||||
return f"s3://{base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET}/{s3_key}"
|
||||
|
||||
# Upload the compliance directory to the S3 bucket
|
||||
compliance_dir = os.path.join(os.path.dirname(zip_path), "compliance")
|
||||
for filename in os.listdir(compliance_dir):
|
||||
local_path = os.path.join(compliance_dir, filename)
|
||||
if not os.path.isfile(local_path):
|
||||
continue
|
||||
file_key = f"{tenant_id}/{scan_id}/compliance/{filename}"
|
||||
s3.upload_file(Filename=local_path, Bucket=bucket, Key=file_key)
|
||||
|
||||
return f"s3://{base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET}/{zip_key}"
|
||||
except (ClientError, NoCredentialsError, ParamValidationError, ValueError) as e:
|
||||
logger.error(f"S3 upload failed: {str(e)}")
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
output_directory, prowler_provider: object, tenant_id: str, scan_id: str
|
||||
) -> str:
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Generate a file system path for the output directory of a prowler scan.
|
||||
|
||||
@@ -145,7 +235,8 @@ def _generate_output_directory(
|
||||
|
||||
Example:
|
||||
>>> _generate_output_directory("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
'/tmp/tenant-1234/aws/scan-5678/prowler-output-2023-02-15T12:34:56'
|
||||
'/tmp/tenant-1234/aws/scan-5678/prowler-output-2023-02-15T12:34:56',
|
||||
'/tmp/tenant-1234/aws/scan-5678/compliance/prowler-output-2023-02-15T12:34:56'
|
||||
"""
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
@@ -153,4 +244,10 @@ def _generate_output_directory(
|
||||
)
|
||||
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
return path
|
||||
compliance_path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/compliance/prowler-output-"
|
||||
f"{prowler_provider}-{output_file_timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(compliance_path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
return path, compliance_path
|
||||
|
||||
@@ -10,6 +10,7 @@ from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.connection import check_provider_connection
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.export import (
|
||||
COMPLIANCE_CLASS_MAP,
|
||||
OUTPUT_FORMATS_MAPPING,
|
||||
_compress_output_files,
|
||||
_generate_output_directory,
|
||||
@@ -23,6 +24,9 @@ from api.decorators import set_tenant
|
||||
from api.models import Finding, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from prowler.config.config import get_available_compliance_frameworks
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
@@ -251,84 +255,107 @@ def generate_outputs(scan_id: str, provider_id: str, tenant_id: str):
|
||||
logger.info(f"No findings found for scan {scan_id}")
|
||||
return {"upload": False}
|
||||
|
||||
# Initialize the prowler provider
|
||||
prowler_provider = initialize_prowler_provider(Provider.objects.get(id=provider_id))
|
||||
provider_obj = Provider.objects.get(id=provider_id)
|
||||
prowler_provider = initialize_prowler_provider(provider_obj)
|
||||
provider_uid = provider_obj.uid
|
||||
provider_type = provider_obj.provider
|
||||
|
||||
# Get the provider UID
|
||||
provider_uid = Provider.objects.get(id=provider_id).uid
|
||||
|
||||
# Generate and ensure the output directory exists
|
||||
output_directory = _generate_output_directory(
|
||||
frameworks_bulk = Compliance.get_bulk(provider_type)
|
||||
frameworks_avail = get_available_compliance_frameworks(provider_type)
|
||||
out_dir, comp_dir = _generate_output_directory(
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY, provider_uid, tenant_id, scan_id
|
||||
)
|
||||
|
||||
# Define auxiliary variables
|
||||
def get_writer(writer_map, name, factory, is_last):
|
||||
"""
|
||||
Return existing writer_map[name] or create via factory().
|
||||
In both cases set `.close_file = is_last`.
|
||||
"""
|
||||
initialization = False
|
||||
if name not in writer_map:
|
||||
writer_map[name] = factory()
|
||||
initialization = True
|
||||
w = writer_map[name]
|
||||
w.close_file = is_last
|
||||
|
||||
return w, initialization
|
||||
|
||||
output_writers = {}
|
||||
compliance_writers = {}
|
||||
|
||||
scan_summary = FindingOutput._transform_findings_stats(
|
||||
ScanSummary.objects.filter(scan_id=scan_id)
|
||||
)
|
||||
|
||||
# Retrieve findings queryset
|
||||
findings_qs = Finding.all_objects.filter(scan_id=scan_id).order_by("uid")
|
||||
qs = Finding.all_objects.filter(scan_id=scan_id).order_by("uid").iterator()
|
||||
for batch, is_last in batched(qs, DJANGO_FINDINGS_BATCH_SIZE):
|
||||
fos = [FindingOutput.transform_api_finding(f, prowler_provider) for f in batch]
|
||||
|
||||
# Process findings in batches
|
||||
for batch, is_last_batch in batched(
|
||||
findings_qs.iterator(), DJANGO_FINDINGS_BATCH_SIZE
|
||||
):
|
||||
finding_outputs = [
|
||||
FindingOutput.transform_api_finding(finding, prowler_provider)
|
||||
for finding in batch
|
||||
]
|
||||
|
||||
# Generate output files
|
||||
for mode, config in OUTPUT_FORMATS_MAPPING.items():
|
||||
kwargs = dict(config.get("kwargs", {}))
|
||||
# Outputs
|
||||
for mode, cfg in OUTPUT_FORMATS_MAPPING.items():
|
||||
cls = cfg["class"]
|
||||
suffix = cfg["suffix"]
|
||||
extra = cfg.get("kwargs", {}).copy()
|
||||
if mode == "html":
|
||||
kwargs["provider"] = prowler_provider
|
||||
kwargs["stats"] = scan_summary
|
||||
extra.update(provider=prowler_provider, stats=scan_summary)
|
||||
|
||||
writer_class = config["class"]
|
||||
if writer_class in output_writers:
|
||||
writer = output_writers[writer_class]
|
||||
writer.transform(finding_outputs)
|
||||
writer.close_file = is_last_batch
|
||||
else:
|
||||
writer = writer_class(
|
||||
findings=finding_outputs,
|
||||
file_path=output_directory,
|
||||
file_extension=config["suffix"],
|
||||
writer, initialization = get_writer(
|
||||
output_writers,
|
||||
cls,
|
||||
lambda cls=cls, fos=fos, suffix=suffix: cls(
|
||||
findings=fos,
|
||||
file_path=out_dir,
|
||||
file_extension=suffix,
|
||||
from_cli=False,
|
||||
)
|
||||
writer.close_file = is_last_batch
|
||||
output_writers[writer_class] = writer
|
||||
),
|
||||
is_last,
|
||||
)
|
||||
if not initialization:
|
||||
writer.transform(fos)
|
||||
writer.batch_write_data_to_file(**extra)
|
||||
writer._data.clear()
|
||||
|
||||
# Write the current batch using the writer
|
||||
writer.batch_write_data_to_file(**kwargs)
|
||||
# Compliance CSVs
|
||||
for name in frameworks_avail:
|
||||
compliance_obj = frameworks_bulk[name]
|
||||
klass = next(
|
||||
(
|
||||
c
|
||||
for cond, c in COMPLIANCE_CLASS_MAP.get(provider_type, [])
|
||||
if cond(name)
|
||||
),
|
||||
GenericCompliance,
|
||||
)
|
||||
filename = f"{comp_dir}_{name}.csv"
|
||||
|
||||
# TODO: Refactor the output classes to avoid this manual reset
|
||||
writer._data = []
|
||||
writer, initialization = get_writer(
|
||||
compliance_writers,
|
||||
name,
|
||||
lambda klass=klass, fos=fos: klass(
|
||||
findings=fos,
|
||||
compliance=compliance_obj,
|
||||
file_path=filename,
|
||||
from_cli=False,
|
||||
),
|
||||
is_last,
|
||||
)
|
||||
if not initialization:
|
||||
writer.transform(fos, compliance_obj, name)
|
||||
writer.batch_write_data_to_file()
|
||||
writer._data.clear()
|
||||
|
||||
# Compress output files
|
||||
output_directory = _compress_output_files(output_directory)
|
||||
compressed = _compress_output_files(out_dir)
|
||||
upload_uri = _upload_to_s3(tenant_id, compressed, scan_id)
|
||||
|
||||
# Save to configured storage
|
||||
uploaded = _upload_to_s3(tenant_id, output_directory, scan_id)
|
||||
|
||||
if uploaded:
|
||||
# Remove the local files after upload
|
||||
if upload_uri:
|
||||
try:
|
||||
rmtree(Path(output_directory).parent, ignore_errors=True)
|
||||
except FileNotFoundError as e:
|
||||
rmtree(Path(compressed).parent, ignore_errors=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting output files: {e}")
|
||||
|
||||
output_directory = uploaded
|
||||
uploaded = True
|
||||
final_location, did_upload = upload_uri, True
|
||||
else:
|
||||
uploaded = False
|
||||
final_location, did_upload = compressed, False
|
||||
|
||||
# Update the scan instance with the output path
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=output_directory)
|
||||
|
||||
logger.info(f"Scan output files generated, output location: {output_directory}")
|
||||
|
||||
return {"upload": uploaded}
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=final_location)
|
||||
logger.info(f"Scan outputs at {final_location}")
|
||||
return {"upload": did_upload}
|
||||
|
||||
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
|
||||
plt.style.use("ggplot")
|
||||
|
||||
|
||||
def run_locust(
|
||||
locust_file: str,
|
||||
host: str,
|
||||
users: int,
|
||||
hatch_rate: int,
|
||||
run_time: str,
|
||||
csv_prefix: Path,
|
||||
) -> Path:
|
||||
artifacts_dir = Path("artifacts")
|
||||
artifacts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
cmd = [
|
||||
"locust",
|
||||
"-f",
|
||||
f"scenarios/{locust_file}",
|
||||
"--headless",
|
||||
"-u",
|
||||
str(users),
|
||||
"-r",
|
||||
str(hatch_rate),
|
||||
"-t",
|
||||
run_time,
|
||||
"--host",
|
||||
host,
|
||||
"--csv",
|
||||
str(artifacts_dir / csv_prefix.name),
|
||||
]
|
||||
print(f"Running Locust: {' '.join(cmd)}")
|
||||
process = subprocess.run(cmd)
|
||||
if process.returncode:
|
||||
sys.exit("Locust execution failed")
|
||||
|
||||
stats_file = artifacts_dir / f"{csv_prefix.stem}_stats.csv"
|
||||
if not stats_file.exists():
|
||||
sys.exit(f"Stats CSV not found: {stats_file}")
|
||||
return stats_file
|
||||
|
||||
|
||||
def load_percentiles(csv_path: Path) -> pd.DataFrame:
|
||||
df = pd.read_csv(csv_path)
|
||||
mapping = {"50%": "p50", "75%": "p75", "90%": "p90", "95%": "p95"}
|
||||
available = [col for col in mapping if col in df.columns]
|
||||
renamed = {col: mapping[col] for col in available}
|
||||
df = df.rename(columns=renamed).set_index("Name")[renamed.values()]
|
||||
return df.drop(index=["Aggregated"], errors="ignore")
|
||||
|
||||
|
||||
def sanitize_label(label: str) -> str:
|
||||
text = re.sub(r"[^\w]+", "_", label.strip().lower())
|
||||
return text.strip("_")
|
||||
|
||||
|
||||
def plot_multi_comparison(metrics: dict[str, pd.DataFrame]) -> None:
|
||||
common = sorted(set.intersection(*(set(df.index) for df in metrics.values())))
|
||||
percentiles = list(next(iter(metrics.values())).columns)
|
||||
groups = len(metrics)
|
||||
width = 0.8 / groups
|
||||
|
||||
for endpoint in common:
|
||||
fig, ax = plt.subplots(figsize=(10, 5), dpi=100)
|
||||
for idx, (label, df) in enumerate(metrics.items()):
|
||||
series = df.loc[endpoint]
|
||||
positions = [
|
||||
i + (idx - groups / 2) * width + width / 2
|
||||
for i in range(len(percentiles))
|
||||
]
|
||||
bars = ax.bar(positions, series.values, width, label=label)
|
||||
for bar in bars:
|
||||
height = bar.get_height()
|
||||
ax.annotate(
|
||||
f"{int(height)}",
|
||||
xy=(bar.get_x() + bar.get_width() / 2, height),
|
||||
xytext=(0, 3),
|
||||
textcoords="offset points",
|
||||
ha="center",
|
||||
va="bottom",
|
||||
fontsize=8,
|
||||
)
|
||||
|
||||
ax.set_xticks(range(len(percentiles)))
|
||||
ax.set_xticklabels(percentiles)
|
||||
ax.set_ylabel("Latency (ms)")
|
||||
ax.set_title(endpoint, fontsize=12)
|
||||
ax.grid(True, axis="y", linestyle="--", alpha=0.7)
|
||||
|
||||
fig.tight_layout()
|
||||
fig.subplots_adjust(right=0.75)
|
||||
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), framealpha=0.9)
|
||||
|
||||
output = Path("artifacts") / f"comparison_{sanitize_label(endpoint)}.png"
|
||||
plt.savefig(output)
|
||||
plt.close(fig)
|
||||
print(f"Saved chart: {output}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Run Locust and compare metrics")
|
||||
parser.add_argument("--locustfile", required=True, help="Locust file in scenarios/")
|
||||
parser.add_argument("--host", required=True, help="Target host URL")
|
||||
parser.add_argument(
|
||||
"--users", type=int, default=10, help="Number of simulated users"
|
||||
)
|
||||
parser.add_argument("--rate", type=int, default=1, help="Hatch rate per second")
|
||||
parser.add_argument("--time", default="1m", help="Test duration (e.g. 30s, 1m)")
|
||||
parser.add_argument(
|
||||
"--metrics-dir", default="baselines", help="Directory with CSV baselines"
|
||||
)
|
||||
parser.add_argument("--version", default="current", help="Test version")
|
||||
args = parser.parse_args()
|
||||
|
||||
metrics_dir = Path(args.metrics_dir)
|
||||
if not metrics_dir.is_dir():
|
||||
sys.exit(f"Metrics directory not found: {metrics_dir}")
|
||||
|
||||
metrics_data: dict[str, pd.DataFrame] = {}
|
||||
for csv_file in sorted(metrics_dir.glob("*.csv")):
|
||||
metrics_data[csv_file.stem] = load_percentiles(csv_file)
|
||||
|
||||
current_prefix = Path(args.version)
|
||||
current_csv = run_locust(
|
||||
locust_file=args.locustfile,
|
||||
host=args.host,
|
||||
users=args.users,
|
||||
hatch_rate=args.rate,
|
||||
run_time=args.time,
|
||||
csv_prefix=current_prefix,
|
||||
)
|
||||
metrics_data[args.version] = load_percentiles(current_csv)
|
||||
|
||||
for endpoint in sorted(
|
||||
set.intersection(*(set(df.index) for df in metrics_data.values()))
|
||||
):
|
||||
parts = [endpoint]
|
||||
for label, df in metrics_data.items():
|
||||
s = df.loc[endpoint]
|
||||
parts.append(f"{label}: p50 {s.p50}, p75 {s.p75}, p90 {s.p90}, p95 {s.p95}")
|
||||
print(" | ".join(parts))
|
||||
|
||||
plot_multi_comparison(metrics_data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,2 @@
|
||||
locust==2.34.1
|
||||
matplotlib==3.10.1
|
||||
@@ -0,0 +1,202 @@
|
||||
from locust import events, task
|
||||
from utils.config import (
|
||||
FINDINGS_UI_SORT_VALUES,
|
||||
L_PROVIDER_NAME,
|
||||
M_PROVIDER_NAME,
|
||||
S_PROVIDER_NAME,
|
||||
TARGET_INSERTED_AT,
|
||||
)
|
||||
from utils.helpers import (
|
||||
APIUserBase,
|
||||
get_api_token,
|
||||
get_auth_headers,
|
||||
get_next_resource_filter,
|
||||
get_resource_filters_pairs,
|
||||
get_scan_id_from_provider_name,
|
||||
get_sort_value,
|
||||
)
|
||||
|
||||
GLOBAL = {
|
||||
"token": None,
|
||||
"scan_ids": {},
|
||||
"resource_filters": None,
|
||||
"large_resource_filters": None,
|
||||
}
|
||||
|
||||
|
||||
@events.test_start.add_listener
|
||||
def on_test_start(environment, **kwargs):
|
||||
GLOBAL["token"] = get_api_token(environment.host)
|
||||
|
||||
GLOBAL["scan_ids"]["small"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], S_PROVIDER_NAME
|
||||
)
|
||||
GLOBAL["scan_ids"]["medium"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], M_PROVIDER_NAME
|
||||
)
|
||||
GLOBAL["scan_ids"]["large"] = get_scan_id_from_provider_name(
|
||||
environment.host, GLOBAL["token"], L_PROVIDER_NAME
|
||||
)
|
||||
|
||||
GLOBAL["resource_filters"] = get_resource_filters_pairs(
|
||||
environment.host, GLOBAL["token"]
|
||||
)
|
||||
GLOBAL["large_resource_filters"] = get_resource_filters_pairs(
|
||||
environment.host, GLOBAL["token"], GLOBAL["scan_ids"]["large"]
|
||||
)
|
||||
|
||||
|
||||
class APIUser(APIUserBase):
|
||||
def on_start(self):
|
||||
self.token = GLOBAL["token"]
|
||||
self.s_scan_id = GLOBAL["scan_ids"]["small"]
|
||||
self.m_scan_id = GLOBAL["scan_ids"]["medium"]
|
||||
self.l_scan_id = GLOBAL["scan_ids"]["large"]
|
||||
self.available_resource_filters = GLOBAL["resource_filters"]
|
||||
self.available_resource_filters_large_scan = GLOBAL["large_resource_filters"]
|
||||
|
||||
@task
|
||||
def findings_default(self):
|
||||
name = "/findings"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page_number}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def findings_default_include(self):
|
||||
name = "/findings?include"
|
||||
page = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
f"&include=scan.provider,resources"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def findings_metadata(self):
|
||||
endpoint = f"/findings/metadata?" f"filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
self.client.get(
|
||||
endpoint, headers=get_auth_headers(self.token), name="/findings/metadata"
|
||||
)
|
||||
|
||||
@task
|
||||
def findings_scan_small(self):
|
||||
name = "/findings?filter[scan_id] - 50k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page_number}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[scan]={self.s_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def findings_metadata_scan_small(self):
|
||||
endpoint = f"/findings/metadata?" f"&filter[scan]={self.s_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name="/findings/metadata?filter[scan_id] - 50k",
|
||||
)
|
||||
|
||||
@task(2)
|
||||
def findings_scan_medium(self):
|
||||
name = "/findings?filter[scan_id] - 250k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page_number}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[scan]={self.m_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def findings_metadata_scan_medium(self):
|
||||
endpoint = f"/findings/metadata?" f"&filter[scan]={self.m_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name="/findings/metadata?filter[scan_id] - 250k",
|
||||
)
|
||||
|
||||
@task
|
||||
def findings_scan_large(self):
|
||||
name = "/findings?filter[scan_id] - 500k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page_number}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def findings_scan_large_include(self):
|
||||
name = "/findings?filter[scan_id]&include - 500k"
|
||||
page_number = self._next_page(name)
|
||||
endpoint = (
|
||||
f"/findings?page[number]={page_number}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
f"&include=scan.provider,resources"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def findings_metadata_scan_large(self):
|
||||
endpoint = f"/findings/metadata?" f"&filter[scan]={self.l_scan_id}"
|
||||
self.client.get(
|
||||
endpoint,
|
||||
headers=get_auth_headers(self.token),
|
||||
name="/findings/metadata?filter[scan_id] - 500k",
|
||||
)
|
||||
|
||||
@task(2)
|
||||
def findings_resource_filter(self):
|
||||
name = "/findings?filter[resource_filter]&include"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/findings?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&include=scan.provider,resources"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(3)
|
||||
def findings_metadata_resource_filter(self):
|
||||
name = "/findings/metadata?filter[resource_filter]"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/findings?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
def findings_resource_filter_large_scan_include(self):
|
||||
name = "/findings?filter[resource_filter][scan]&include - 500k"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/findings?filter[{filter_name}]={filter_value}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
f"&include=scan.provider,resources"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
@@ -0,0 +1,19 @@
|
||||
import os
|
||||
|
||||
USER_EMAIL = os.environ.get("USER_EMAIL")
|
||||
USER_PASSWORD = os.environ.get("USER_PASSWORD")
|
||||
|
||||
BASE_HEADERS = {"Content-Type": "application/vnd.api+json"}
|
||||
|
||||
FINDINGS_UI_SORT_VALUES = ["severity", "status", "-inserted_at"]
|
||||
TARGET_INSERTED_AT = os.environ.get("TARGET_INSERTED_AT", "2025-04-22")
|
||||
|
||||
FINDINGS_RESOURCE_METADATA = {
|
||||
"regions": "region",
|
||||
"resource_types": "resource_type",
|
||||
"services": "service",
|
||||
}
|
||||
|
||||
S_PROVIDER_NAME = "provider-50k"
|
||||
M_PROVIDER_NAME = "provider-250k"
|
||||
L_PROVIDER_NAME = "provider-500k"
|
||||
@@ -0,0 +1,168 @@
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from threading import Lock
|
||||
|
||||
import requests
|
||||
from locust import HttpUser, between
|
||||
from utils.config import (
|
||||
BASE_HEADERS,
|
||||
FINDINGS_RESOURCE_METADATA,
|
||||
TARGET_INSERTED_AT,
|
||||
USER_EMAIL,
|
||||
USER_PASSWORD,
|
||||
)
|
||||
|
||||
_global_page_counters = defaultdict(int)
|
||||
_page_lock = Lock()
|
||||
|
||||
|
||||
class APIUserBase(HttpUser):
|
||||
"""
|
||||
Base class for API user simulation in Locust performance tests.
|
||||
|
||||
Attributes:
|
||||
abstract (bool): Indicates this is an abstract user class.
|
||||
wait_time: Time between task executions, randomized between 1 and 5 seconds.
|
||||
"""
|
||||
|
||||
abstract = True
|
||||
wait_time = between(1, 5)
|
||||
|
||||
def _next_page(self, endpoint_name: str) -> int:
|
||||
"""
|
||||
Returns the next page number for a given endpoint. Thread-safe.
|
||||
|
||||
Args:
|
||||
endpoint_name (str): Name of the API endpoint being paginated.
|
||||
|
||||
Returns:
|
||||
int: The next page number for the given endpoint.
|
||||
"""
|
||||
with _page_lock:
|
||||
_global_page_counters[endpoint_name] += 1
|
||||
return _global_page_counters[endpoint_name]
|
||||
|
||||
|
||||
def get_next_resource_filter(available_values: dict) -> tuple:
|
||||
"""
|
||||
Randomly selects a filter type and value from available options.
|
||||
|
||||
Args:
|
||||
available_values (dict): Dictionary with filter types as keys and list of possible values.
|
||||
|
||||
Returns:
|
||||
tuple: A (filter_type, filter_value) pair randomly selected.
|
||||
"""
|
||||
filter_type = random.choice(list(available_values.keys()))
|
||||
filter_value = random.choice(available_values[filter_type])
|
||||
return filter_type, filter_value
|
||||
|
||||
|
||||
def get_auth_headers(token: str) -> dict:
|
||||
"""
|
||||
Returns the headers for the API requests.
|
||||
|
||||
Args:
|
||||
token (str): The token to be included in the headers.
|
||||
|
||||
Returns:
|
||||
dict: The headers for the API requests.
|
||||
"""
|
||||
return {
|
||||
"Authorization": f"Bearer {token}",
|
||||
**BASE_HEADERS,
|
||||
}
|
||||
|
||||
|
||||
def get_api_token(host: str) -> str:
|
||||
"""
|
||||
Authenticates with the API and retrieves a bearer token.
|
||||
|
||||
Args:
|
||||
host (str): The host URL of the API.
|
||||
|
||||
Returns:
|
||||
str: The access token for authenticated requests.
|
||||
|
||||
Raises:
|
||||
AssertionError: If the request fails or does not return a 200 status code.
|
||||
"""
|
||||
login_payload = {
|
||||
"data": {
|
||||
"type": "tokens",
|
||||
"attributes": {"email": USER_EMAIL, "password": USER_PASSWORD},
|
||||
}
|
||||
}
|
||||
response = requests.post(f"{host}/tokens", json=login_payload, headers=BASE_HEADERS)
|
||||
assert response.status_code == 200, f"Failed to get token: {response.text}"
|
||||
return response.json()["data"]["attributes"]["access"]
|
||||
|
||||
|
||||
def get_scan_id_from_provider_name(host: str, token: str, provider_name: str) -> str:
|
||||
"""
|
||||
Retrieves the scan ID associated with a specific provider name.
|
||||
|
||||
Args:
|
||||
host (str): The host URL of the API.
|
||||
token (str): Bearer token for authentication.
|
||||
provider_name (str): Name of the provider to filter scans by.
|
||||
|
||||
Returns:
|
||||
str: The ID of the scan.
|
||||
|
||||
Raises:
|
||||
AssertionError: If the request fails or does not return a 200 status code.
|
||||
"""
|
||||
response = requests.get(
|
||||
f"{host}/scans?fields[scans]=id&filter[provider_alias]={provider_name}",
|
||||
headers=get_auth_headers(token),
|
||||
)
|
||||
assert response.status_code == 200, f"Failed to get scan: {response.text}"
|
||||
return response.json()["data"][0]["id"]
|
||||
|
||||
|
||||
def get_resource_filters_pairs(host: str, token: str, scan_id: str = "") -> dict:
|
||||
"""
|
||||
Retrieves and maps resource metadata filter values from the findings endpoint.
|
||||
|
||||
Args:
|
||||
host (str): The host URL of the API.
|
||||
token (str): Bearer token for authentication.
|
||||
scan_id (str, optional): Optional scan ID to filter metadata. Defaults to using inserted_at timestamp.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of resource filter metadata.
|
||||
|
||||
Raises:
|
||||
AssertionError: If the request fails or does not return a 200 status code.
|
||||
"""
|
||||
metadata_filters = (
|
||||
f"filter[scan]={scan_id}"
|
||||
if scan_id
|
||||
else f"filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
)
|
||||
response = requests.get(
|
||||
f"{host}/findings/metadata?{metadata_filters}", headers=get_auth_headers(token)
|
||||
)
|
||||
assert (
|
||||
response.status_code == 200
|
||||
), f"Failed to get resource filters values: {response.text}"
|
||||
attributes = response.json()["data"]["attributes"]
|
||||
return {
|
||||
FINDINGS_RESOURCE_METADATA[key]: values
|
||||
for key, values in attributes.items()
|
||||
if key in FINDINGS_RESOURCE_METADATA.keys()
|
||||
}
|
||||
|
||||
|
||||
def get_sort_value(sort_values: list) -> str:
|
||||
"""
|
||||
Constructs a sort query string from a list of sort keys.
|
||||
|
||||
Args:
|
||||
sort_values (list): The list of sort values to include in the query.
|
||||
|
||||
Returns:
|
||||
str: A formatted sort query string (e.g., "sort=created_at,-severity").
|
||||
"""
|
||||
return f"sort={','.join(sort_values)}"
|
||||
@@ -1,6 +1,9 @@
|
||||
#!/bin/bash
|
||||
# Run Prowler against All AWS Accounts in an AWS Organization
|
||||
|
||||
# Activate Poetry Environment
|
||||
eval "$(poetry env activate)"
|
||||
|
||||
# Show Prowler Version
|
||||
prowler -v
|
||||
|
||||
|
||||
@@ -399,7 +399,6 @@ mainConfig:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
|
||||
@@ -16,7 +16,6 @@ spec:
|
||||
containers:
|
||||
- name: prowler
|
||||
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
command: ["prowler"]
|
||||
args: ["kubernetes", "-z", "-b"]
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
|
||||
+345
-2
@@ -2228,13 +2228,356 @@ def get_section_containers_ens(data, section_1, section_2, section_3, section_4)
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
|
||||
def get_section_containers_3_levels(data, section_1, section_2, section_3):
|
||||
data["STATUS"] = data["STATUS"].apply(map_status_to_icon)
|
||||
findings_counts_marco = (
|
||||
data.groupby([section_1, "STATUS"]).size().unstack(fill_value=0)
|
||||
)
|
||||
section_containers = []
|
||||
data[section_1] = data[section_1].astype(str)
|
||||
data[section_2] = data[section_2].astype(str)
|
||||
data[section_3] = data[section_3].astype(str)
|
||||
|
||||
data.sort_values(
|
||||
by=section_3,
|
||||
key=lambda x: x.map(extract_numeric_values),
|
||||
ascending=True,
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
for marco in data[section_1].unique():
|
||||
success_marco = findings_counts_marco.loc[marco].get(pass_emoji, 0)
|
||||
failed_marco = findings_counts_marco.loc[marco].get(fail_emoji, 0)
|
||||
|
||||
fig_name = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_marco],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_marco],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_name.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_marco + failed_marco,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_marco),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_marco),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_name.add_annotation(
|
||||
x=failed_marco,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_name, config={"staticPlot": True}, className="info-bar"
|
||||
),
|
||||
className="graph-section",
|
||||
)
|
||||
direct_internal_items = []
|
||||
|
||||
for categoria in data[data[section_1] == marco][section_2].unique():
|
||||
specific_data = data[
|
||||
(data[section_1] == marco) & (data[section_2] == categoria)
|
||||
]
|
||||
findings_counts_categoria = (
|
||||
specific_data.groupby([section_2, "STATUS"])
|
||||
.size()
|
||||
.unstack(fill_value=0)
|
||||
)
|
||||
success_categoria = findings_counts_categoria.loc[categoria].get(
|
||||
pass_emoji, 0
|
||||
)
|
||||
failed_categoria = findings_counts_categoria.loc[categoria].get(
|
||||
fail_emoji, 0
|
||||
)
|
||||
|
||||
fig_section = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_categoria],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_categoria],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_section.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_categoria + failed_categoria,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_categoria),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_categoria),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_section.add_annotation(
|
||||
x=failed_categoria,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div_section = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_section,
|
||||
config={"staticPlot": True},
|
||||
className="info-bar-child",
|
||||
),
|
||||
className="graph-section-req",
|
||||
)
|
||||
direct_internal_items_idgrupocontrol = []
|
||||
|
||||
for idgrupocontrol in specific_data[section_3].unique():
|
||||
specific_data2 = specific_data[
|
||||
specific_data[section_3] == idgrupocontrol
|
||||
]
|
||||
findings_counts_idgrupocontrol = (
|
||||
specific_data2.groupby([section_3, "STATUS"])
|
||||
.size()
|
||||
.unstack(fill_value=0)
|
||||
)
|
||||
success_idgrupocontrol = findings_counts_idgrupocontrol.loc[
|
||||
idgrupocontrol
|
||||
].get(pass_emoji, 0)
|
||||
failed_idgrupocontrol = findings_counts_idgrupocontrol.loc[
|
||||
idgrupocontrol
|
||||
].get(fail_emoji, 0)
|
||||
|
||||
fig_idgrupocontrol = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_idgrupocontrol],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_idgrupocontrol],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_idgrupocontrol.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_idgrupocontrol + failed_idgrupocontrol,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_idgrupocontrol),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_idgrupocontrol),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_idgrupocontrol.add_annotation(
|
||||
x=failed_idgrupocontrol,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div_idgrupocontrol = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_idgrupocontrol,
|
||||
config={"staticPlot": True},
|
||||
className="info-bar-child",
|
||||
),
|
||||
className="graph-section-req",
|
||||
)
|
||||
|
||||
data_table = dash_table.DataTable(
|
||||
data=specific_data2.to_dict("records"),
|
||||
columns=[
|
||||
{"name": i, "id": i}
|
||||
for i in [
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
],
|
||||
style_table={"overflowX": "auto"},
|
||||
style_as_list_view=True,
|
||||
style_cell={"textAlign": "left", "padding": "5px"},
|
||||
)
|
||||
|
||||
internal_accordion_item_2 = dbc.AccordionItem(
|
||||
title=idgrupocontrol,
|
||||
children=[
|
||||
graph_div_idgrupocontrol,
|
||||
html.Div([data_table], className="inner-accordion-content"),
|
||||
],
|
||||
)
|
||||
direct_internal_items_idgrupocontrol.append(
|
||||
html.Div(
|
||||
[
|
||||
graph_div_idgrupocontrol,
|
||||
dbc.Accordion(
|
||||
[internal_accordion_item_2],
|
||||
start_collapsed=True,
|
||||
flush=True,
|
||||
),
|
||||
],
|
||||
className="accordion-inner--child",
|
||||
)
|
||||
)
|
||||
|
||||
internal_accordion_item = dbc.AccordionItem(
|
||||
title=categoria,
|
||||
children=direct_internal_items_idgrupocontrol,
|
||||
)
|
||||
internal_section_container = html.Div(
|
||||
[
|
||||
graph_div_section,
|
||||
dbc.Accordion(
|
||||
[internal_accordion_item], start_collapsed=True, flush=True
|
||||
),
|
||||
],
|
||||
className="accordion-inner--child",
|
||||
)
|
||||
direct_internal_items.append(internal_section_container)
|
||||
|
||||
accordion_item = dbc.AccordionItem(title=marco, children=direct_internal_items)
|
||||
section_container = html.Div(
|
||||
[
|
||||
graph_div,
|
||||
dbc.Accordion([accordion_item], start_collapsed=True, flush=True),
|
||||
],
|
||||
className="accordion-inner",
|
||||
)
|
||||
section_containers.append(section_container)
|
||||
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
|
||||
# This function extracts and compares up to two numeric values, ensuring correct sorting for version-like strings.
|
||||
def extract_numeric_values(value):
|
||||
numbers = re.findall(r"\d+", str(value))
|
||||
if len(numbers) >= 2:
|
||||
if len(numbers) == 3:
|
||||
return int(numbers[0]), int(numbers[1]), int(numbers[2])
|
||||
elif len(numbers) == 2:
|
||||
return int(numbers[0]), int(numbers[1])
|
||||
elif len(numbers) == 1:
|
||||
return int(numbers[0]), 0
|
||||
return int(numbers[0])
|
||||
return 0, 0
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format2
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -10,6 +10,7 @@ def get_table(data):
|
||||
[
|
||||
"REQUIREMENTS_ATTRIBUTES_NAME",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
@@ -17,6 +18,10 @@ def get_table(data):
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_containers_format2(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_NAME", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_NAME",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format2
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -10,6 +10,7 @@ def get_table(data):
|
||||
[
|
||||
"REQUIREMENTS_ATTRIBUTES_NAME",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
@@ -18,6 +19,9 @@ def get_table(data):
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_format2(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ATTRIBUTES_NAME"
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_NAME",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_kisa_ismsp
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -8,7 +8,7 @@ warnings.filterwarnings("ignore")
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_DOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
# "REQUIREMENTS_DESCRIPTION",
|
||||
@@ -20,6 +20,9 @@ def get_table(data):
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_kisa_ismsp(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SUBDOMAIN", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_DOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_kisa_ismsp
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -8,7 +8,7 @@ warnings.filterwarnings("ignore")
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_DOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
# "REQUIREMENTS_DESCRIPTION",
|
||||
@@ -20,6 +20,9 @@ def get_table(data):
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_kisa_ismsp(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SUBDOMAIN", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_DOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBDOMAIN",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
)
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -398,6 +398,10 @@ def display_data(
|
||||
f"dashboard.compliance.{current}"
|
||||
)
|
||||
data.drop_duplicates(keep="first", inplace=True)
|
||||
|
||||
if "threatscore" in analytics_input:
|
||||
data = get_threatscore_mean_by_pillar(data)
|
||||
|
||||
table = compliance_module.get_table(data)
|
||||
except ModuleNotFoundError:
|
||||
table = html.Div(
|
||||
@@ -430,6 +434,9 @@ def display_data(
|
||||
if "pci" in analytics_input:
|
||||
pie_2 = get_bar_graph(df, "REQUIREMENTS_ID")
|
||||
current_filter = "req_id"
|
||||
elif "threatscore" in analytics_input:
|
||||
pie_2 = get_table_prowler_threatscore(df)
|
||||
current_filter = "threatscore"
|
||||
elif (
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION" in df.columns
|
||||
and not df["REQUIREMENTS_ATTRIBUTES_SECTION"].isnull().values.any()
|
||||
@@ -488,6 +495,13 @@ def display_data(
|
||||
pie_2, f"Top 5 failed {current_filter} by requirements"
|
||||
)
|
||||
|
||||
if "threatscore" in analytics_input:
|
||||
security_level_graph = get_graph(
|
||||
pie_2,
|
||||
"Pillar Score by requirements (1 = Lowest Risk, 5 = Highest Risk)",
|
||||
margin_top=0,
|
||||
)
|
||||
|
||||
return (
|
||||
table_output,
|
||||
overall_status_result_graph,
|
||||
@@ -501,7 +515,7 @@ def display_data(
|
||||
)
|
||||
|
||||
|
||||
def get_graph(pie, title):
|
||||
def get_graph(pie, title, margin_top=7):
|
||||
return [
|
||||
html.Span(
|
||||
title,
|
||||
@@ -514,7 +528,7 @@ def get_graph(pie, title):
|
||||
"display": "flex",
|
||||
"justify-content": "center",
|
||||
"align-items": "center",
|
||||
"margin-top": "7%",
|
||||
"margin-top": f"{margin_top}%",
|
||||
},
|
||||
),
|
||||
]
|
||||
@@ -618,3 +632,87 @@ def get_table(current_compliance, table):
|
||||
className="relative flex flex-col bg-white shadow-provider rounded-xl px-4 py-3 flex-wrap w-full",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_threatscore_mean_by_pillar(df):
|
||||
modified_df = df[df["STATUS"] == "FAIL"]
|
||||
|
||||
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
|
||||
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
|
||||
pillar_means = (
|
||||
modified_df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
|
||||
]
|
||||
.mean()
|
||||
.round(2)
|
||||
)
|
||||
|
||||
output = []
|
||||
for pillar, mean in pillar_means.items():
|
||||
output.append(f"{pillar} - [{mean}]")
|
||||
|
||||
for value in output:
|
||||
if value.split(" - ")[0] in df["REQUIREMENTS_ATTRIBUTES_SECTION"].values:
|
||||
df.loc[
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"] == value.split(" - ")[0],
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
] = value
|
||||
return df
|
||||
|
||||
|
||||
def get_table_prowler_threatscore(df):
|
||||
df = df[df["STATUS"] == "FAIL"]
|
||||
|
||||
# Delete " - " from the column REQUIREMENTS_ATTRIBUTES_SECTION
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"] = (
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"].str.split(" - ").str[0]
|
||||
)
|
||||
|
||||
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
|
||||
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
|
||||
score_df = (
|
||||
df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
|
||||
]
|
||||
.mean()
|
||||
.reset_index()
|
||||
.rename(
|
||||
columns={
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION": "Pillar",
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK": "Score",
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
fig = px.bar(
|
||||
score_df,
|
||||
x="Pillar",
|
||||
y="Score",
|
||||
color="Score",
|
||||
color_continuous_scale=[
|
||||
"#45cc6e",
|
||||
"#f4d44d",
|
||||
"#e77676",
|
||||
], # verde → amarillo → rojo
|
||||
hover_data={"Score": True, "Pillar": True},
|
||||
labels={"Score": "Average Risk Score", "Pillar": "Section"},
|
||||
height=400,
|
||||
)
|
||||
|
||||
fig.update_layout(
|
||||
xaxis_title="Pillar",
|
||||
yaxis_title="Level of Risk",
|
||||
margin=dict(l=20, r=20, t=30, b=20),
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
coloraxis_colorbar=dict(title="Risk"),
|
||||
)
|
||||
|
||||
return dcc.Graph(
|
||||
figure=fig,
|
||||
style={"height": "25rem", "width": "40rem"},
|
||||
)
|
||||
|
||||
@@ -134,6 +134,8 @@ Prowler for M365 currently supports the following authentication types:
|
||||
|
||||
### Service Principal authentication
|
||||
|
||||
Authentication flag: `--sp-env-auth`
|
||||
|
||||
To allow Prowler assume the service principal identity to start the scan it is needed to configure the following environment variables:
|
||||
|
||||
```console
|
||||
@@ -146,6 +148,9 @@ If you try to execute Prowler with the `--sp-env-auth` flag and those variables
|
||||
Follow the instructions in the [Create Prowler Service Principal](../tutorials/azure/create-prowler-service-principal.md) section to create a service principal.
|
||||
|
||||
### Service Principal and User Credentials authentication (recommended)
|
||||
|
||||
Authentication flag: `--env-auth`
|
||||
|
||||
This authentication method follows the same approach as the service principal method but introduces two additional environment variables for user credentials: `M365_USER` and `M365_ENCRYPTED_PASSWORD`.
|
||||
|
||||
```console
|
||||
@@ -176,4 +181,6 @@ Write-Output $encryptedPassword
|
||||
|
||||
### Interactive Browser authentication
|
||||
|
||||
To use `--browser-auth` the user needs to authenticate against Azure using the default browser to start the scan, also `--tenant-id` flag is required.
|
||||
Authentication flag: `--browser-auth`
|
||||
|
||||
This authentication method requires the user to authenticate against Azure using the default browser to start the scan, also `--tenant-id` flag is required.
|
||||
|
||||
@@ -570,6 +570,10 @@ kubectl logs prowler-XXXXX --namespace prowler-ns
|
||||
With M365 you need to specify which auth method is going to be used:
|
||||
|
||||
```console
|
||||
|
||||
# To use both service principal (for MSGraph) and user credentials (for PowerShell modules)
|
||||
prowler m365 --env-auth
|
||||
|
||||
# To use service principal authentication
|
||||
prowler m365 --sp-env-auth
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ spec:
|
||||
containers:
|
||||
- name: prowler
|
||||
image: toniblyx/prowler:stable
|
||||
command: ["prowler"]
|
||||
args: ["kubernetes", "-z"]
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
|
||||
Generated
+964
-890
File diff suppressed because it is too large
Load Diff
+35
-1
@@ -8,14 +8,48 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
- Add SOC2 compliance framework to Azure [(#7489)](https://github.com/prowler-cloud/prowler/pull/7489).
|
||||
- Add check for unused Service Accounts in GCP [(#7419)](https://github.com/prowler-cloud/prowler/pull/7419).
|
||||
- Add Powershell to Microsoft365 [(#7331)](https://github.com/prowler-cloud/prowler/pull/7331).
|
||||
- Add service Defender to Microsoft365 with one check for Common Attachments filter enabled in Malware Policies [(#7425)](https://github.com/prowler-cloud/prowler/pull/7425).
|
||||
- Add check for Outbound Antispam Policy well configured in service Defender for M365 [(#7480)](https://github.com/prowler-cloud/prowler/pull/7480).
|
||||
- Add check for Antiphishing Policy well configured in service Defender in M365 [(#7453)](https://github.com/prowler-cloud/prowler/pull/7453).
|
||||
- Add check for Notifications for Internal users enabled in Malware Policies from service Defender in M365 [(#7435)](https://github.com/prowler-cloud/prowler/pull/7435).
|
||||
- Support CLOUDSDK_AUTH_ACCESS_TOKEN in GCP [(#7495)](https://github.com/prowler-cloud/prowler/pull/7495).
|
||||
- Add Powershell to Microsoft365 [(#7331)](https://github.com/prowler-cloud/prowler/pull/7331)
|
||||
- Add service Exchange to Microsoft365 with one check for Organizations Mailbox Auditing enabled [(#7408)](https://github.com/prowler-cloud/prowler/pull/7408)
|
||||
- Add check for Bypass Disable in every Mailbox for service Defender in M365 [(#7418)](https://github.com/prowler-cloud/prowler/pull/7418)
|
||||
- Add new check `teams_external_domains_restricted` [(#7557)](https://github.com/prowler-cloud/prowler/pull/7557)
|
||||
- Add new check `teams_email_sending_to_channel_disabled` [(#7533)](https://github.com/prowler-cloud/prowler/pull/7533)
|
||||
- Add new check for External Mails Tagged for service Exchange in M365 [(#7580)](https://github.com/prowler-cloud/prowler/pull/7580)
|
||||
- Add new check for WhiteList not used in Transport Rules for service Defender in M365 [(#7569)](https://github.com/prowler-cloud/prowler/pull/7569)
|
||||
- Add check for Inbound Antispam Policy with no allowed domains from service Defender in M365 [(#7500)](https://github.com/prowler-cloud/prowler/pull/7500)
|
||||
- Add new check `teams_meeting_anonymous_user_join_disabled` [(#7565)](https://github.com/prowler-cloud/prowler/pull/7565)
|
||||
- Add new check `teams_unmanaged_communication_disabled` [(#7561)](https://github.com/prowler-cloud/prowler/pull/7561)
|
||||
- Add new check `teams_external_users_cannot_start_conversations` [(#7562)](https://github.com/prowler-cloud/prowler/pull/7562)
|
||||
- Add new check for AllowList not used in the Connection Filter Policy from service Defender in M365 [(#7492)](https://github.com/prowler-cloud/prowler/pull/7492)
|
||||
- Add new check for SafeList not enabled in the Connection Filter Policy from service Defender in M365 [(#7492)](https://github.com/prowler-cloud/prowler/pull/7492)
|
||||
- Add new check for DKIM enabled for service Defender in M365 [(#7485)](https://github.com/prowler-cloud/prowler/pull/7485)
|
||||
- Add new check `teams_meeting_anonymous_user_start_disabled` [(#7567)](https://github.com/prowler-cloud/prowler/pull/7567)
|
||||
- Add new check `teams_meeting_external_lobby_bypass_disabled` [(#7568)](https://github.com/prowler-cloud/prowler/pull/7568)
|
||||
- Add new check `teams_meeting_dial_in_lobby_bypass_disabled` [(#7571)](https://github.com/prowler-cloud/prowler/pull/7571)
|
||||
- Add new check `teams_meeting_external_control_disabled` [(#7604)](https://github.com/prowler-cloud/prowler/pull/7604)
|
||||
- Add new check `teams_meeting_external_chat_disabled` [(#7605)](https://github.com/prowler-cloud/prowler/pull/7605)
|
||||
- Add new check `teams_meeting_recording_disabled` [(#7607)](https://github.com/prowler-cloud/prowler/pull/7607)
|
||||
- Add new check `teams_meeting_presenters_restricted` [(#7613)](https://github.com/prowler-cloud/prowler/pull/7613)
|
||||
- Add new check `teams_meeting_chat_anonymous_users_disabled` [(#7579)](https://github.com/prowler-cloud/prowler/pull/7579)
|
||||
- Add Prowler Threat Score Compliance Framework [(#7603)](https://github.com/prowler-cloud/prowler/pull/7603)
|
||||
- Add new check `sharepoint_onedrive_sync_restricted_unmanaged_devices` [(#7589)](https://github.com/prowler-cloud/prowler/pull/7589)
|
||||
- Add new check for Additional Storage restricted for Exchange in M365 [(#7638)](https://github.com/prowler-cloud/prowler/pull/7638)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix package name location in pyproject.toml while replicating for prowler-cloud [(#7531)](https://github.com/prowler-cloud/prowler/pull/7531).
|
||||
- Remove cache in PyPI release action [(#7532)](https://github.com/prowler-cloud/prowler/pull/7532).
|
||||
- Add the correct values for logger.info inside iam service [(#7526)](https://github.com/prowler-cloud/prowler/pull/7526).
|
||||
- Update S3 bucket naming validation to accept dots [(#7545)](https://github.com/prowler-cloud/prowler/pull/7545).
|
||||
- Handle new FlowLog model properties in Azure [(#7546)](https://github.com/prowler-cloud/prowler/pull/7546).
|
||||
- Improve compliance and dashboard [(#7596)](https://github.com/prowler-cloud/prowler/pull/7596)
|
||||
- Remove invalid parameter `create_file_descriptor` [(#7600)](https://github.com/prowler-cloud/prowler/pull/7600)
|
||||
- Remove first empty line in HTML output [(#7606)](https://github.com/prowler-cloud/prowler/pull/7606)
|
||||
- Ensure that ContentType in upload_file matches the uploaded file’s format [(#7635)](https://github.com/prowler-cloud/prowler/pull/7635)
|
||||
|
||||
---
|
||||
|
||||
|
||||
+45
-1
@@ -70,6 +70,15 @@ from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
|
||||
AzureMitreAttack,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_gcp import GCPMitreAttack
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
|
||||
ProwlerThreatScoreAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azure import (
|
||||
ProwlerThreatScoreAzure,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
|
||||
ProwlerThreatScoreGCP,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
@@ -478,6 +487,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(kisa_ismsp)
|
||||
kisa_ismsp.batch_write_data_to_file()
|
||||
elif compliance_name == "prowler_threatscore_aws":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
prowler_threatscore = ProwlerThreatScoreAWS(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(prowler_threatscore)
|
||||
prowler_threatscore.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
@@ -545,6 +566,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(iso27001)
|
||||
iso27001.batch_write_data_to_file()
|
||||
elif compliance_name == "prowler_threatscore_azure":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
prowler_threatscore = ProwlerThreatScoreAzure(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(prowler_threatscore)
|
||||
prowler_threatscore.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
@@ -612,6 +645,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(iso27001)
|
||||
iso27001.batch_write_data_to_file()
|
||||
elif compliance_name == "prowler_threatscore_gcp":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
prowler_threatscore = ProwlerThreatScoreGCP(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(prowler_threatscore)
|
||||
prowler_threatscore.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
@@ -717,7 +762,6 @@ def prowler():
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
create_file_descriptor=True,
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(generic_compliance)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.1 Executive Participation",
|
||||
"AuditChecklist": [
|
||||
"Is there documentation outlining the responsibilities and roles of executives to ensure their participation in the establishment and operation of the information protection and personal information protection management system?",
|
||||
@@ -41,7 +41,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.2 Designation of Chief Officers",
|
||||
"AuditChecklist": [
|
||||
"Has the CEO officially designated a chief officer responsible for overseeing information protection and personal information protection?",
|
||||
@@ -77,7 +77,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.3 Organization Structure",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and operated a working group with expertise to support the work of the CISO and CPO and systematically implement the organization's information protection and personal information protection activities?",
|
||||
@@ -112,7 +112,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.4 Scope Setting",
|
||||
"AuditChecklist": [
|
||||
"Has the organization set the scope of the management system to include key assets that may affect core services and personal information processing?",
|
||||
@@ -145,7 +145,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.5 Policy Establishment",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established a top-level information protection and personal information protection policy that serves as the foundation for all information protection and personal information protection activities?",
|
||||
@@ -180,7 +180,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.1. Management System",
|
||||
"Subdomain": "1.1 Management System",
|
||||
"Section": "1.1.6 Resource Allocation",
|
||||
"AuditChecklist": [
|
||||
"Has the organization secured personnel with expertise in the fields of information protection and personal information protection?",
|
||||
@@ -216,7 +216,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.2. Risk Management",
|
||||
"Subdomain": "1.2 Risk Management",
|
||||
"Section": "1.2.1 Identification of Information Assets",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established classification criteria for information assets and identified all assets within the scope of the information protection and personal information protection management system, maintaining them in a list?",
|
||||
@@ -249,7 +249,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.2. Risk Management",
|
||||
"Subdomain": "1.2 Risk Management",
|
||||
"Section": "1.2.2 Status and Flow Analysis",
|
||||
"AuditChecklist": [
|
||||
"Has the organization identified and documented the status and workflows of information services across all areas of the management system?",
|
||||
@@ -279,7 +279,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.2. Risk Management",
|
||||
"Subdomain": "1.2 Risk Management",
|
||||
"Section": "1.2.3 Risk Assessment",
|
||||
"AuditChecklist": [
|
||||
"Has the organization defined methods for identifying and assessing risks that could arise from various aspects, depending on the characteristics of the organization or service?",
|
||||
@@ -322,7 +322,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.2. Risk Management",
|
||||
"Subdomain": "1.2 Risk Management",
|
||||
"Section": "1.2.4 Selection of Protective Measures",
|
||||
"AuditChecklist": [
|
||||
"Has the organization developed risk treatment strategies (e.g., risk reduction, avoidance, transfer, acceptance) and selected protective measures to address the identified risks?",
|
||||
@@ -352,7 +352,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.3. Operation of the Management System",
|
||||
"Subdomain": "1.3 Operation of the Management System",
|
||||
"Section": "1.3.1 Implementation of Protective Measures",
|
||||
"AuditChecklist": [
|
||||
"Are the protective measures effectively implemented according to the implementation plan, and are the implementation results reported to management to verify their accuracy and effectiveness?",
|
||||
@@ -384,7 +384,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.3. Operation of the Management System",
|
||||
"Subdomain": "1.3 Operation of the Management System",
|
||||
"Section": "1.3.2 Sharing of Protective Measures",
|
||||
"AuditChecklist": [
|
||||
"Has the organization clearly identified the departments and personnel responsible for the operation or implementation of the protective measures?",
|
||||
@@ -409,7 +409,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.3. Operation of the Management System",
|
||||
"Subdomain": "1.3 Operation of the Management System",
|
||||
"Section": "1.3.3 Operation Status Management",
|
||||
"AuditChecklist": [
|
||||
"Are information protection and personal information protection activities that need to be performed periodically or continuously for the operation of the management system documented and managed?",
|
||||
@@ -439,7 +439,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.4. Inspection and Improvement of the Management System",
|
||||
"Subdomain": "1.4 Inspection and Improvement of the Management System",
|
||||
"Section": "1.4.1 Review of Legal Requirements Compliance",
|
||||
"AuditChecklist": [
|
||||
"Is the organization regularly identifying and maintaining up-to-date legal requirements related to information protection and personal information protection?",
|
||||
@@ -477,7 +477,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.4. Inspection and Improvement of the Management System",
|
||||
"Subdomain": "1.4 Inspection and Improvement of the Management System",
|
||||
"Section": "1.4.2 Management System Audit",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established a management system audit plan that includes the criteria, scope, frequency, and qualifications for audit personnel to audit the management system's effectiveness in accordance with legal requirements and established policies?",
|
||||
@@ -508,7 +508,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "1. Establishment and Operation of the Management System",
|
||||
"Subdomain": "1.4. Management System Inspection and Improvement",
|
||||
"Subdomain": "1.4 Inspection and Improvement of the Management System",
|
||||
"Section": "1.4.3 Management System Improvement",
|
||||
"AuditChecklist": [
|
||||
"Are the root causes of the issues identified during legal compliance reviews and management system inspections analyzed, and are preventive and improvement measures established and implemented?",
|
||||
@@ -541,7 +541,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.1. Policies, Organization, and Asset Management",
|
||||
"Subdomain": "2.1 Policy, Organization, Asset Management",
|
||||
"Section": "2.1.1 Policy Maintenance",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented a procedure for regularly reviewing the validity of information protection and personal information protection policies and implementation documents?",
|
||||
@@ -577,7 +577,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.1. Policies, Organization, and Asset Management",
|
||||
"Subdomain": "2.1 Policy, Organization, Asset Management",
|
||||
"Section": "2.1.2 Organization Maintenance",
|
||||
"AuditChecklist": [
|
||||
"Are the roles and responsibilities of those responsible for and involved in information protection and personal information protection clearly defined?",
|
||||
@@ -624,8 +624,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.1. Policy, Organization, Asset Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.1 Policy, Organization, Asset Management",
|
||||
"Section": "2.1.3 Management of Information Assets",
|
||||
"AuditChecklist": [
|
||||
"Are handling procedures (creation, introduction, storage, use, disposal) and protection measures defined and implemented according to the security classification of information assets?",
|
||||
@@ -657,8 +657,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.2. Personnel Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.1 Designation and Management of Key Personnel",
|
||||
"AuditChecklist": [
|
||||
"Are the criteria for key duties, such as handling personal information and important information or accessing key systems, clearly defined?",
|
||||
@@ -693,8 +693,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.2. Personnel Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.2 Separation of Duties",
|
||||
"AuditChecklist": [
|
||||
"Are criteria for the separation of duties established and applied to prevent potential harm from the misuse or abuse of authority?",
|
||||
@@ -720,8 +720,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.2. Human Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.3 Security Pledge",
|
||||
"AuditChecklist": [
|
||||
"When hiring new personnel, is there a signed security and personal information protection agreement that specifies their responsibilities?",
|
||||
@@ -750,8 +750,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.2. Human Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.4 Awareness and Training",
|
||||
"AuditChecklist": [
|
||||
"Is an annual training plan approved by management, detailing the timing, duration, target audience, content, and method of information protection and personal information protection training?",
|
||||
@@ -788,8 +788,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.2. Human Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.5 Management of Resignation and Job Changes",
|
||||
"AuditChecklist": [
|
||||
"Are personnel changes (e.g., resignation, job changes, department transfers, leave of absence) shared among HR, information protection, personal information protection, and IT system operations departments?",
|
||||
@@ -820,8 +820,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.2. Human Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.2 Human Security",
|
||||
"Section": "2.2.6 Actions in Case of Security Violations",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established disciplinary measures for employees and relevant external parties in case of violations of information protection and personal information protection responsibilities and obligations under laws, regulations, and internal policies?",
|
||||
@@ -847,8 +847,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.3. External Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.3 External Security",
|
||||
"Section": "2.3.1 Management of External Parties",
|
||||
"AuditChecklist": [
|
||||
"Has the organization identified the status of outsourcing and the use of external facilities and services within the scope of the management system?",
|
||||
@@ -878,8 +878,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.3. External Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.3 External Security",
|
||||
"Section": "2.3.2 Security in Contracts with External Parties",
|
||||
"AuditChecklist": [
|
||||
"When selecting external services or outsourcing vendors related to the handling of important information and personal information, does the organization follow procedures to consider the vendors' capabilities in information protection and personal information protection?",
|
||||
@@ -910,8 +910,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.3. External Party Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.3 External Security",
|
||||
"Section": "2.3.3 External Party Security Implementation Management",
|
||||
"AuditChecklist": [
|
||||
"Are periodic inspections or audits conducted to ensure external parties comply with information protection and personal information protection requirements specified in contracts, agreements, and internal policies?",
|
||||
@@ -945,8 +945,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.3. External Party Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.3 External Security",
|
||||
"Section": "2.3.4 Security for External Party Contract Changes and Expiry",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented security measures to ensure the return of information assets, deletion of information system access accounts, and the acquisition of confidentiality agreements in accordance with official procedures when an external party contract expires, a task is completed, or there is a personnel change?",
|
||||
@@ -977,8 +977,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.1 Designation of Protected Zones",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established criteria for designating physical protection zones such as controlled areas, restricted areas, and reception areas to protect personal and sensitive information, documents, storage media, key facilities, and systems from physical and environmental threats?",
|
||||
@@ -1008,8 +1008,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.2 Access Control",
|
||||
"AuditChecklist": [
|
||||
"Is access to protected areas controlled so that only authorized personnel are allowed to enter according to access procedures?",
|
||||
@@ -1040,8 +1040,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.3 Information System Protection",
|
||||
"AuditChecklist": [
|
||||
"Are information systems placed in separated locations based on their importance, usage, and characteristics?",
|
||||
@@ -1068,8 +1068,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.4 Operation of Protective Facilities",
|
||||
"AuditChecklist": [
|
||||
"Are necessary facilities established and operational procedures set up based on the importance and characteristics of each protected area to prevent disasters such as fire, flood, and power failure caused by human error or natural disasters?",
|
||||
@@ -1100,8 +1100,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.5 Operations in Secure Zones",
|
||||
"AuditChecklist": [
|
||||
"When operations within secure zones, such as the introduction and maintenance of information systems, are required, are formal procedures for application and execution of such operations established and implemented?",
|
||||
@@ -1127,8 +1127,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.6 Device Control for Inbound and Outbound",
|
||||
"AuditChecklist": [
|
||||
"Are control procedures established and implemented to prevent security incidents such as information leakage and malware infection when information systems, mobile devices, storage media, etc., are brought into or taken out of secure zones?",
|
||||
@@ -1157,8 +1157,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.4. Physical Security",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.4 Physical Security",
|
||||
"Section": "2.4.7 Work Environment Security",
|
||||
"AuditChecklist": [
|
||||
"Are protection measures established and implemented for shared facilities and office equipment such as document storage, shared PCs, multifunction printers, file servers, etc.?",
|
||||
@@ -1215,8 +1215,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.5. Authentication and Access Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.1 User Account Management",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented formal procedures for registering, changing, and deleting user accounts and access rights to information systems, personal information, and critical information?",
|
||||
@@ -1248,8 +1248,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.5. Authentication and Access Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.2 User Identification",
|
||||
"AuditChecklist": [
|
||||
"Are unique identifiers assigned to users and personal information handlers in information systems and personal information processing systems, and is the use of easily guessable identifiers restricted?",
|
||||
@@ -1309,8 +1309,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.5. Authentication and Authorization Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.3 User Authentication",
|
||||
"AuditChecklist": [
|
||||
"Is access to information systems and personal information processing systems controlled through secure user authentication procedures, login attempt limitations, and warnings for illegal login attempts?",
|
||||
@@ -1354,8 +1354,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.5. Authentication and Authorization Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.4 Password Management",
|
||||
"AuditChecklist": [
|
||||
"Are procedures for managing and creating secure user passwords for information systems established and implemented?",
|
||||
@@ -1397,8 +1397,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.5. Authentication and Privilege Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.5 Management of Special Accounts and Privileges",
|
||||
"AuditChecklist": [
|
||||
"Is there a formal privilege request and approval process established and implemented to ensure that special privileges, such as administrative privileges, are only granted to a minimal number of people?",
|
||||
@@ -1445,8 +1445,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.5. Authentication and Privilege Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.5 Authentication and Access Management",
|
||||
"Section": "2.5.6 Review of Access Rights",
|
||||
"AuditChecklist": [
|
||||
"Are the histories of account and access right creation, registration, granting, use, modification, and deletion for information systems, personal information, and important information being recorded?",
|
||||
@@ -1590,8 +1590,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Control Measures",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.1 Network Access",
|
||||
"AuditChecklist": [
|
||||
"Has the organization identified all access paths to its network and ensured that internal networks are controlled so that only authorized users can access them according to the access control policy?",
|
||||
@@ -1651,8 +1651,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.2 Access to Information Systems",
|
||||
"AuditChecklist": [
|
||||
"Have users, access locations, and access means allowed to access operating systems (OS) of information systems such as servers, network systems, and security systems been defined and controlled?",
|
||||
@@ -1687,8 +1687,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.3 Access to Applications",
|
||||
"AuditChecklist": [
|
||||
"Are access rights to applications granted differentially based on the user's tasks to control access to sensitive information?",
|
||||
@@ -1769,8 +1769,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.4 Database Access",
|
||||
"AuditChecklist": [
|
||||
"Are you identifying the information stored and managed in the database, such as the table list?",
|
||||
@@ -1804,8 +1804,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.5 Wireless Network Access",
|
||||
"AuditChecklist": [
|
||||
"When using a wireless network for business purposes, are you establishing and implementing protection measures such as authentication and encryption of transmitted and received data to ensure the security of the wireless AP and network segment?",
|
||||
@@ -1864,8 +1864,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.6 Remote Access Control",
|
||||
"AuditChecklist": [
|
||||
"Is remote operation of information systems through external networks such as the internet prohibited in principle, and are compensatory measures in place if allowed for unavoidable reasons such as incident response?",
|
||||
@@ -1922,8 +1922,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.6. Access Control",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.6 Access Control",
|
||||
"Section": "2.6.7 Internet Access Control",
|
||||
"AuditChecklist": [
|
||||
"Is there an established and implemented policy to control internet access for work PCs used for key duties and personal information handling terminals?",
|
||||
@@ -2025,8 +2025,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.7. Application of Encryption",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.7 Application of Encryption",
|
||||
"Section": "2.7.1 Application of Encryption Policy",
|
||||
"AuditChecklist": [
|
||||
"Has an encryption policy been established that includes encryption targets, encryption strength, and encryption usage in consideration of legal requirements for the protection of personal and important information?",
|
||||
@@ -2069,8 +2069,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.7. Application of Encryption",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.7 Application of Encryption",
|
||||
"Section": "2.7.2 Cryptographic Key Management",
|
||||
"AuditChecklist": [
|
||||
"Are procedures for the generation, use, storage, distribution, modification, recovery, and destruction of cryptographic keys established and implemented?",
|
||||
@@ -2116,8 +2116,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.8. Security for Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.1 Definition of Security Requirements",
|
||||
"AuditChecklist": [
|
||||
"When introducing, developing, or modifying an information system, are procedures for reviewing the validity of information protection and personal information protection aspects and for acquisition established and implemented?",
|
||||
@@ -2167,8 +2167,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.8. Security for Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.2 Review and Testing of Security Requirements",
|
||||
"AuditChecklist": [
|
||||
"When introducing, developing, or modifying an information system, are tests conducted to verify whether the security requirements defined during the analysis and design stages have been effectively applied?",
|
||||
@@ -2208,8 +2208,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Requirements for Protection Measures",
|
||||
"Subdomain": "2.8. Security for Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.3 Separation of Test and Production Environments",
|
||||
"AuditChecklist": [
|
||||
"Are development and test systems separated from the production system?",
|
||||
@@ -2237,8 +2237,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.8. Security in Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.4 Test Data Security",
|
||||
"AuditChecklist": [
|
||||
"Is the use of actual operational data restricted during the development and testing of information systems?",
|
||||
@@ -2269,8 +2269,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.8. Security in Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.5 Source Program Management",
|
||||
"AuditChecklist": [
|
||||
"Have procedures been established and implemented to control access to source programs by unauthorized persons?",
|
||||
@@ -2297,8 +2297,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.8. Security in Information System Introduction and Development",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.8 Security in Information System Introduction and Development",
|
||||
"Section": "2.8.6 Transition to Operational Environment",
|
||||
"AuditChecklist": [
|
||||
"Have control procedures been established and implemented to safely transition newly introduced, developed, or modified systems to the operational environment?",
|
||||
@@ -2341,8 +2341,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.9. System and Service Operations Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.1 Change Management",
|
||||
"AuditChecklist": [
|
||||
"Have procedures been established and implemented for changes to assets related to information systems (hardware, operating systems, commercial software packages, etc.)?",
|
||||
@@ -2409,8 +2409,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.9. System and Service Operations Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.2 Performance and Fault Management",
|
||||
"AuditChecklist": [
|
||||
"Have procedures been established and implemented to continuously monitor performance and capacity to ensure the availability of information systems?",
|
||||
@@ -2480,8 +2480,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.9. System and Service Operation Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.3 Backup and Recovery Management",
|
||||
"AuditChecklist": [
|
||||
"Have backup and recovery procedures been established and implemented, including targets, frequency, methods, and procedures?",
|
||||
@@ -2595,8 +2595,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.9. System and Service Operation Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.4 Log and Access Record Management",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established log management procedures for information systems such as servers, applications, security systems, and network systems, and is it generating and storing the necessary logs accordingly?",
|
||||
@@ -2658,8 +2658,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Control Requirements",
|
||||
"Subdomain": "2.9. System and Service Operation Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.5 Log and Access Record Inspection",
|
||||
"AuditChecklist": [
|
||||
"Are there established log review and monitoring procedures, including the frequency, targets, and methods for detecting errors, misuse (unauthorized access, excessive queries, etc.), fraud, and other anomalies in the information system?",
|
||||
@@ -2693,8 +2693,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Control Requirements",
|
||||
"Subdomain": "2.9. System and Service Operation Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.6 Time Synchronization",
|
||||
"AuditChecklist": [
|
||||
"Is the system time synchronized with the standard time?",
|
||||
@@ -2719,8 +2719,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Control Requirements",
|
||||
"Subdomain": "2.9. System and Service Operation Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.9 System and Service Operation Management",
|
||||
"Section": "2.9.7 Reuse and Disposal of Information Assets",
|
||||
"AuditChecklist": [
|
||||
"Are secure reuse and disposal procedures for information assets established and implemented?",
|
||||
@@ -2831,8 +2831,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.1 Security System Operation",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented operational procedures for the security systems in use?",
|
||||
@@ -2872,8 +2872,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measure Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.2 Cloud Security",
|
||||
"AuditChecklist": [
|
||||
"Is the responsibility and role for information protection and personal information protection clearly defined with the cloud service provider, and is it reflected in contracts (such as SLA)?",
|
||||
@@ -2984,8 +2984,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measure Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.3 Public Server Security",
|
||||
"AuditChecklist": [
|
||||
"Are protective measures established and implemented for the operation of public servers?",
|
||||
@@ -3014,8 +3014,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.4 Security for Electronic Transactions and FinTech",
|
||||
"AuditChecklist": [
|
||||
"Are protection measures established and implemented to ensure the safety and reliability of transactions when providing electronic transaction and FinTech services?",
|
||||
@@ -3059,8 +3059,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.5 Secure Information Transmission",
|
||||
"AuditChecklist": [
|
||||
"Has a secure transmission policy been established when transmitting personal and critical information to external organizations?",
|
||||
@@ -3093,8 +3093,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measure Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.6 Security for Business Devices",
|
||||
"AuditChecklist": [
|
||||
"Are security control policies, such as device authentication, approval, access scope, and security settings, established and implemented for devices used for business purposes, such as PCs, laptops, virtual PCs, and tablets?",
|
||||
@@ -3129,8 +3129,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.7 Management of Removable Media",
|
||||
"AuditChecklist": [
|
||||
"Are policies and procedures established and implemented for handling (use), storage, disposal, and reuse of removable media such as external hard drives, USB memory, and CDs?",
|
||||
@@ -3179,8 +3179,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.8 Patch Management",
|
||||
"AuditChecklist": [
|
||||
"Are patch management policies and procedures for operating systems (OS) and software established and implemented according to the characteristics and importance of each asset, such as servers, network systems, security systems, and PCs?",
|
||||
@@ -3213,8 +3213,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Security Control Requirements",
|
||||
"Subdomain": "2.10. System and Service Security Management",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.10 System and Service Security Management",
|
||||
"Section": "2.10.9 Malware Control",
|
||||
"AuditChecklist": [
|
||||
"Are protection measures established and implemented to protect information systems and business terminals from malware such as viruses, worms, Trojans, and ransomware?",
|
||||
@@ -3248,8 +3248,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.11. Incident Prevention and Response",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.11 Incident Prevention and Response",
|
||||
"Section": "2.11.1 Establishment of Incident Prevention and Response System",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established procedures and systems to prevent security breaches and personal information leaks and to respond quickly and effectively when incidents occur?",
|
||||
@@ -3307,8 +3307,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measures Requirements",
|
||||
"Subdomain": "2.11. Incident Prevention and Response",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.11 Incident Prevention and Response",
|
||||
"Section": "2.11.2 Vulnerability Inspection and Remediation",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented procedures for conducting regular vulnerability inspections of information systems?",
|
||||
@@ -3371,8 +3371,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.11. Incident Prevention and Response",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.11 Incident Prevention and Response",
|
||||
"Section": "2.11.3 Abnormal Behavior Analysis and Monitoring",
|
||||
"AuditChecklist": [
|
||||
"Is the organization collecting, analyzing, and monitoring network traffic, data flows, and event logs from major information systems, applications, networks, and security systems to detect abnormal behaviors such as intrusion attempts, personal information leakage attempts, or fraudulent activities?",
|
||||
@@ -3403,8 +3403,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.11. Incident Prevention and Response",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.11 Incident Prevention and Response",
|
||||
"Section": "2.11.4 Incident Response Training and Improvement",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established a simulation training plan for responding to security incidents and personal information leakage incidents, and are such training exercises conducted at least once a year?",
|
||||
@@ -3431,8 +3431,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protection Measures Requirements",
|
||||
"Subdomain": "2.11. Incident Prevention and Response",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.11 Incident Prevention and Response",
|
||||
"Section": "2.11.5 Incident Response and Recovery",
|
||||
"AuditChecklist": [
|
||||
"When signs of or actual incidents of security breaches or personal information leakage are detected, is the organization responding and reporting promptly according to the defined incident response procedures?",
|
||||
@@ -3501,8 +3501,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measure Requirements",
|
||||
"Subdomain": "2.12. Disaster Recovery",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.12 Disaster Recovery",
|
||||
"Section": "2.12.1 Safety Measures for Disaster Preparedness",
|
||||
"AuditChecklist": [
|
||||
"Has the organization identified IT disaster types that could threaten the continuity of core services (businesses) and analyzed the expected scale of damage and impact on operations to identify core IT services (businesses) and systems?",
|
||||
@@ -3570,8 +3570,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "2. Protective Measure Requirements",
|
||||
"Subdomain": "2.12. Disaster Recovery",
|
||||
"Domain": "2. Control Measures Requirements",
|
||||
"Subdomain": "2.12 Disaster Recovery",
|
||||
"Section": "2.12.2 Disaster Recovery Testing and Improvement",
|
||||
"AuditChecklist": [
|
||||
"Has the organization established and implemented disaster recovery test plans to evaluate the effectiveness of the established IT disaster recovery system?",
|
||||
@@ -3599,7 +3599,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1. Protection Measures during Personal Information Collection",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.1 Collection and Use of Personal Information",
|
||||
"AuditChecklist": [
|
||||
"When collecting personal information, is it collected in accordance with lawful requirements such as obtaining the data subject’s consent, complying with legal obligations, or concluding and fulfilling contracts?",
|
||||
@@ -3645,7 +3645,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1. Protection Measures during Personal Information Collection",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.2 Restrictions on the Collection of Personal Information",
|
||||
"AuditChecklist": [
|
||||
"When collecting personal information, is only the minimum amount of information necessary for the intended purpose being collected?",
|
||||
@@ -3678,7 +3678,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1. Protection Measures during Personal Information Collection",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.3 Restrictions on the Processing of Resident Registration Numbers",
|
||||
"AuditChecklist": [
|
||||
"Are resident registration numbers only processed when there is a clear legal basis?",
|
||||
@@ -3713,8 +3713,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Personal Information Processing Requirements",
|
||||
"Subdomain": "3.1. Protection Measures for Personal Information Collection",
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.4 Restriction on Processing of Sensitive and Unique Identifying Information",
|
||||
"AuditChecklist": [
|
||||
"Is sensitive information processed only with the separate consent of the data subject or when legally required?",
|
||||
@@ -3744,8 +3744,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Personal Information Processing Requirements",
|
||||
"Subdomain": "3.1. Protection Measures for Personal Information Collection",
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.5 Indirect Collection of Personal Information",
|
||||
"AuditChecklist": [
|
||||
"When receiving personal information from a third party, is it clearly stated in the contract that the responsibility for obtaining consent for the collection of personal information lies with the party providing the information?",
|
||||
@@ -3779,8 +3779,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Personal Information Processing Requirements",
|
||||
"Subdomain": "3.1. Protection Measures for Personal Information Collection",
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.6 Installation and Operation of Video Information Processing Devices",
|
||||
"AuditChecklist": [
|
||||
"When installing and operating fixed video information processing devices in public places, is it reviewed whether the installation meets legal requirements?",
|
||||
@@ -3819,7 +3819,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.1. Protection Measures When Collecting Personal Information",
|
||||
"Subdomain": "3.1 Protection Measures for Personal Information Collection",
|
||||
"Section": "3.1.7 Collection and Use of Personal Information for Marketing Purposes",
|
||||
"AuditChecklist": [
|
||||
"When obtaining consent from data subjects to process personal information for the purpose of promoting or recommending goods or services, is the data subject clearly informed, and is separate consent obtained?",
|
||||
@@ -3861,7 +3861,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.2. Protection Measures When Retaining and Using Personal Information",
|
||||
"Subdomain": "3.2 Protection Measures When Retaining and Using Personal Information",
|
||||
"Section": "3.2.1 Management of Personal Information Status",
|
||||
"AuditChecklist": [
|
||||
"Is the status of collected and retained personal information, including the items, volume, purpose and method of processing, and retention period, regularly managed?",
|
||||
@@ -3904,7 +3904,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information",
|
||||
"Subdomain": "3.2 Protection Measures When Retaining and Using Personal Information",
|
||||
"Section": "3.2.2 Personal Information Quality Assurance",
|
||||
"AuditChecklist": [
|
||||
"Are procedures and methods in place to maintain personal information in an accurate and up-to-date state?",
|
||||
@@ -3932,7 +3932,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information",
|
||||
"Subdomain": "3.2 Protection Measures When Retaining and Using Personal Information",
|
||||
"Section": "3.2.3 Protection of User Device Access",
|
||||
"AuditChecklist": [
|
||||
"When accessing information stored on the user's mobile device or functions installed on the device, are users clearly informed and their consent obtained?",
|
||||
@@ -3963,7 +3963,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information",
|
||||
"Subdomain": "3.2 Protection Measures When Retaining and Using Personal Information",
|
||||
"Section": "3.2.4 Use and Provision of Personal Information Beyond Purpose",
|
||||
"AuditChecklist": [
|
||||
"Is personal information used or provided only within the scope of the purpose consented to by the data subject at the time of collection or as permitted by law?",
|
||||
@@ -4000,7 +4000,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information",
|
||||
"Subdomain": "3.2 Protection Measures When Retaining and Using Personal Information",
|
||||
"Section": "3.2.5 Processing of Pseudonymized Information",
|
||||
"AuditChecklist": [
|
||||
"When processing pseudonymized information, are procedures established for purpose limitation, pseudonymization methods and standards, adequacy review, prohibition of re-identification, and actions in case of re-identification?",
|
||||
@@ -4035,7 +4035,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.3. Protective Measures When Providing Personal Information",
|
||||
"Subdomain": "3.3 Protection Measures When Providing Personal Information",
|
||||
"Section": "3.3.1 Provision of Personal Information to Third Parties",
|
||||
"AuditChecklist": [
|
||||
"When providing personal information to third parties, are legal requirements such as consent from the data subject or compliance with legal obligations clearly identified and followed?",
|
||||
@@ -4074,7 +4074,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.3. Protective Measures When Providing Personal Information",
|
||||
"Subdomain": "3.3 Protection Measures When Providing Personal Information",
|
||||
"Section": "3.3.2 Outsourcing of Personal Information Processing",
|
||||
"AuditChecklist": [
|
||||
"When outsourcing personal information processing tasks (including sub-outsourcing) to third parties, are the details of the outsourced tasks and the trustees regularly updated and disclosed on the website?",
|
||||
@@ -4106,7 +4106,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.3. Protective Measures When Providing Personal Information",
|
||||
"Subdomain": "3.3 Protection Measures When Providing Personal Information",
|
||||
"Section": "3.3.3 Transfer of Personal Information Due to Business Transfers",
|
||||
"AuditChecklist": [
|
||||
"When transferring personal information to another party due to the transfer or merger of all or part of the business, are the necessary matters communicated to the data subjects in advance?",
|
||||
@@ -4137,7 +4137,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.3. Protection Measures When Providing Personal Information",
|
||||
"Subdomain": "3.3 Protection Measures When Providing Personal Information",
|
||||
"Section": "3.3.4 Transfer of Personal Information Abroad",
|
||||
"AuditChecklist": [
|
||||
"When transferring personal information abroad, has the data subject been fully informed of all notification requirements and obtained separate consent, or complied with certification or recognition, as required by law?",
|
||||
@@ -4171,7 +4171,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.4. Protection Measures When Destroying Personal Information",
|
||||
"Subdomain": "3.4 Protection Measures When Destroying Personal Information",
|
||||
"Section": "3.4.1 Destruction of Personal Information",
|
||||
"AuditChecklist": [
|
||||
"Has an internal policy been established regarding the retention period and destruction of personal information?",
|
||||
@@ -4205,7 +4205,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.4. Protection Measures When Destroying Personal Information",
|
||||
"Subdomain": "3.4 Protection Measures When Destroying Personal Information",
|
||||
"Section": "3.4.2 Measures When Retaining Personal Information After Purpose Is Achieved",
|
||||
"AuditChecklist": [
|
||||
"When personal information is retained beyond the retention period or after the processing purpose has been achieved, in accordance with relevant laws, is it limited to the minimum necessary period and only the minimum necessary information?",
|
||||
@@ -4238,7 +4238,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.5. Protection of Data Subject's Rights",
|
||||
"Subdomain": "3.5 Protection of Data Subject's Rights",
|
||||
"Section": "3.5.1 Disclosure of Privacy Policy",
|
||||
"AuditChecklist": [
|
||||
"Is the privacy policy written in clear and easy-to-understand language, covering all the contents required by law?",
|
||||
@@ -4270,7 +4270,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.5. Protection of Data Subject's Rights",
|
||||
"Subdomain": "3.5 Protection of Data Subject's Rights",
|
||||
"Section": "3.5.2 Guaranteeing Data Subject's Rights",
|
||||
"AuditChecklist": [
|
||||
"Are procedures in place to ensure that data subjects or their representatives can exercise their rights (hereinafter referred to as 'Requests for Access, etc.') to access, rectify, delete, or suspend the processing of their personal information in a way that is not more difficult than the process used for collecting it?",
|
||||
@@ -4309,7 +4309,7 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Domain": "3. Requirements for Each Stage of Personal Information Processing",
|
||||
"Subdomain": "3.5. Protection of Data Subject's Rights",
|
||||
"Subdomain": "3.5 Protection of Data Subject's Rights",
|
||||
"Section": "3.5.3 Notification to Data Subjects",
|
||||
"AuditChecklist": [
|
||||
"If the organization is legally obligated to do so, does it periodically notify data subjects of the use and provision of their personal information, or provide them with access to an information system where they can review such details?",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2607,7 +2607,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Network security groups should be periodically evaluated for port misconfigurations. Where certain ports and protocols may be exposed to the Internet, they should be evaluated for necessity and restricted wherever they are not explicitly required.",
|
||||
@@ -2629,7 +2629,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Network security groups should be periodically evaluated for port misconfigurations. Where certain ports and protocols may be exposed to the Internet, they should be evaluated for necessity and restricted wherever they are not explicitly required.",
|
||||
@@ -2651,7 +2651,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Network security groups should be periodically evaluated for port misconfigurations. Where certain ports and protocols may be exposed to the Internet, they should be evaluated for necessity and restricted wherever they are not explicitly required.",
|
||||
@@ -2673,7 +2673,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Network security groups should be periodically evaluated for port misconfigurations. Where certain ports and protocols may be exposed to the Internet, they should be evaluated for necessity and restricted wherever they are not explicitly required and narrowly configured.",
|
||||
@@ -2695,7 +2695,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
@@ -2717,7 +2717,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Network Watcher for Azure subscriptions.",
|
||||
@@ -2737,7 +2737,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6 Networking",
|
||||
"Section": "6. Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Public IP Addresses provide tenant accounts with Internet connectivity for resources contained within the tenant. During the creation of certain resources in Azure, a Public IP Address may be created. All Public IP Addresses within the tenant should be periodically reviewed for accuracy and necessity.",
|
||||
@@ -2759,7 +2759,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Azure Bastion service allows secure remote access to Azure Virtual Machines over the Internet without exposing remote access protocol ports and services directly to the Internet. The Azure Bastion service provides this access using TLS over 443/TCP, and subscribes to hardened configurations within an organization's Azure Active Directory service.",
|
||||
@@ -2781,7 +2781,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Migrate blob-based VHDs to Managed Disks on Virtual Machines to exploit the default features of this configuration. The features include: 1. Default Disk Encryption 2. Resilience, as Microsoft will managed the disk storage and move around if underlying hardware goes faulty 3. Reduction of costs over storage accounts",
|
||||
@@ -2803,7 +2803,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that OS disks (boot volumes) and data disks (non-boot volumes) are encrypted with CMK (Customer Managed Keys). Customer Managed keys can be either ADE orServer Side Encryption (SSE).",
|
||||
@@ -2825,7 +2825,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that unattached disks in a subscription are encrypted with a Customer Managed Key (CMK).",
|
||||
@@ -2845,7 +2845,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For added security, only install organization-approved extensions on VMs.",
|
||||
@@ -2867,7 +2867,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Install endpoint protection for all virtual machines.",
|
||||
@@ -2887,7 +2887,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7 Virtual Machines",
|
||||
"Section": "7. Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "NOTE: This is a legacy recommendation. Managed Disks are encrypted by default and recommended for all new VM implementations. VHD (Virtual Hard Disks) are stored in blob storage and are the old-style disks that were attached to Virtual Machines. The blob VHD was then leased to the VM. By default, storage accounts are not encrypted, and Microsoft Defender will then recommend that the OS disks should be encrypted. Storage accounts can be encrypted as a whole using PMK or CMK. This should be turned on for storage accounts containing VHDs",
|
||||
@@ -2909,7 +2909,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that all Keys in Role Based Access Control (RBAC) Azure Key Vaults have an expiration date set.",
|
||||
@@ -2931,7 +2931,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that all Keys in Non Role Based Access Control (RBAC) Azure Key Vaults have an expiration date set.",
|
||||
@@ -2953,7 +2953,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that all Secrets in Role Based Access Control (RBAC) Azure Key Vaults have an expiration date set.",
|
||||
@@ -2975,7 +2975,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that all Secrets in Non Role Based Access Control (RBAC) Azure Key Vaults have an expiration date set.",
|
||||
@@ -2997,7 +2997,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Key Vault contains object keys, secrets, and certificates. Accidental unavailability of a Key Vault can cause immediate data loss or loss of security functions (authentication, validation, verification, non-repudiation, etc.) supported by the Key Vault objects. It is recommended the Key Vault be made recoverable by enabling the 'Do Not Purge' and 'Soft Delete' functions. This is in order to prevent loss of encrypted data, including storage accounts, SQL databases, and/or dependent services provided by Key Vault objects (Keys, Secrets, Certificates) etc. This may happen in the case of accidental deletion by a user or from disruptive activity by a malicious user. WARNING: A current limitation of the soft-delete feature across all Azure services is role assignments disappearing when Key Vault is deleted. All role assignments will need to be recreated after recovery.",
|
||||
@@ -3019,7 +3019,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "WARNING: Role assignments disappear when a Key Vault has been deleted (soft-delete) and recovered. Afterwards it will be required to recreate all role assignments. This is a limitation of the soft-delete feature across all Azure services.",
|
||||
@@ -3041,7 +3041,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Private endpoints will secure network traffic from Azure Key Vault to the resources requesting secrets and keys.",
|
||||
@@ -3063,7 +3063,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8 Key Vault",
|
||||
"Section": "8. Key Vault",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Automatic Key Rotation is available in Public Preview. The currently supported applications are Key Vault, Managed Disks, and Storage accounts accessing keys within Key Vault. The number of supported applications will incrementally increased.",
|
||||
@@ -3085,7 +3085,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure App Service Authentication is a feature that can prevent anonymous HTTP requests from reaching a Web Application or authenticate those with tokens before they reach the app. If an anonymous request is received from a browser, App Service will redirect to a logon page. To handle the logon process, a choice from a set of identity providers can be made, or a custom authentication mechanism can be implemented.",
|
||||
@@ -3107,7 +3107,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure Web Apps allows sites to run under both HTTP and HTTPS by default. Web apps can be accessed by anyone using non-secure HTTP links by default. Non-secure HTTP requests can be restricted and all HTTP requests redirected to the secure HTTPS port. It is recommended to enforce HTTPS-only traffic.",
|
||||
@@ -3129,7 +3129,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS. App service allows TLS 1.2 by default, which is the recommended TLS level by industry standards such as PCI DSS.",
|
||||
@@ -3151,7 +3151,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Client certificates allow for the app to request a certificate for incoming requests. Only clients that have a valid certificate will be able to reach the app.",
|
||||
@@ -3173,7 +3173,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Managed service identity in App Service provides more security by eliminating secrets from the app, such as credentials in the connection strings. When registering with Azure Active Directory in App Service, the app will connect to other Azure services securely without the need for usernames and passwords.",
|
||||
@@ -3195,7 +3195,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Periodically newer versions are released for PHP software either due to security flaws or to include additional functionality. Using the latest PHP version for web apps is recommended in order to take advantage of security fixes, if any, and/or additional functionalities of the newer version.",
|
||||
@@ -3217,7 +3217,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Periodically, newer versions are released for Python software either due to security flaws or to include additional functionality. Using the latest full Python version for web apps is recommended in order to take advantage of security fixes, if any, and/or additional functionalities of the newer version.",
|
||||
@@ -3239,7 +3239,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Periodically, newer versions are released for Java software either due to security flaws or to include additional functionality. Using the latest Java version for web apps is recommended in order to take advantage of security fixes, if any, and/or new functionalities of the newer version.",
|
||||
@@ -3261,7 +3261,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Periodically, newer versions are released for HTTP either due to security flaws or to include additional functionality. Using the latest HTTP version for web apps to takeadvantage of security fixes, if any, and/or new functionalities of the newer version.",
|
||||
@@ -3283,7 +3283,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "By default, Azure Functions, Web, and API Services can be deployed over FTP. If FTP is required for an essential deployment workflow, FTPS should be required for FTP login for all App Service Apps and Functions.",
|
||||
@@ -3303,7 +3303,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9 AppService",
|
||||
"Section": "9. AppService",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Azure Key Vault will store multiple types of sensitive information such as encryption keys, certificate thumbprints, and Managed Identity Credentials. Access to these 'Secrets' can be controlled through granular permissions.",
|
||||
@@ -3323,7 +3323,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "10 Miscellaneous",
|
||||
"Section": "10. Miscellaneous",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource Manager Locks provide a way for administrators to lock down Azure resources to prevent deletion of, or modifications to, a resource. These locks sit outside of the Role Based Access Controls (RBAC) hierarchy and, when applied, will place restrictions on the resource for all users. These locks are very useful when there is an important resource in a subscription that users should not be able to delete or change. Locks can help prevent accidental and malicious changes or deletion.",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators or appropriately delegated users to create new tenants.",
|
||||
@@ -32,7 +32,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Entra ID is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources as a guest user. Guest users in every subscription should be review on a regular basis to ensure that inactive and unneeded accounts are removed.",
|
||||
@@ -52,7 +52,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensures that two alternate forms of identification are provided before allowing a password reset.",
|
||||
@@ -72,7 +72,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Azure provides a Global Banned Password policy that applies to Azure administrative and normal user accounts. This is not applied to user accounts that are synced from an on-premise Active Directory unless Microsoft Entra ID Connect is used and you enable EnforceCloudPasswordPolicyForPasswordSyncedUsers. Please see the list in default values on the specifics of this policy. To further password security, it is recommended to further define a custom banned password policy.",
|
||||
@@ -92,7 +92,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that the number of days before users are asked to re-confirm their authentication information is not set to 0.",
|
||||
@@ -112,7 +112,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that users are notified on their primary and secondary emails on password resets.",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that all Global Administrators are notified if any other administrator resets their password.",
|
||||
@@ -154,7 +154,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for applications before use.",
|
||||
@@ -176,7 +176,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Allow users to provide consent for selected permissions when a request is coming from a verified publisher.",
|
||||
@@ -196,7 +196,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for the apps before use.",
|
||||
@@ -218,7 +218,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
|
||||
@@ -240,7 +240,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Limit guest user permissions.",
|
||||
@@ -262,7 +262,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict invitations to users with specific administrative roles only.",
|
||||
@@ -282,7 +282,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict access to the Microsoft Entra ID administration center to administrators only. **NOTE**: This only affects access to the Entra ID administrator's web portal. This setting does not prohibit privileged users from using other methods such as Rest API or Powershell to obtain sensitive information from Microsoft Entra ID.",
|
||||
@@ -302,7 +302,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict access to group web interface in the Access Panel portal.",
|
||||
@@ -324,7 +324,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group creation to administrators only.",
|
||||
@@ -344,7 +344,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group management to administrators only.",
|
||||
@@ -366,7 +366,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict Microsoft 365 group creation to administrators only.",
|
||||
@@ -386,7 +386,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Joining or registering devices to Microsoft Entra ID should require Multi-factor authentication.",
|
||||
@@ -408,7 +408,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The principle of least privilege should be followed and only necessary privileges should be assigned instead of allowing full administrative access.",
|
||||
@@ -430,7 +430,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource locking is a powerful protection mechanism that can prevent inadvertent modification/deletion of resources within Azure subscriptions/Resource Groups and is a recommended NIST configuration.",
|
||||
@@ -450,7 +450,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Users who are set as subscription owners are able to make administrative changes to the subscriptions and move them into and out of Microsoft Entra ID.",
|
||||
@@ -472,7 +472,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation aims to maintain a balance between security and operational efficiency by ensuring that a minimum of 2 and a maximum of 4 users are assigned the Global Administrator role in Microsoft Entra ID. Having at least two Global Administrators ensures redundancy, while limiting the number to four reduces the risk of excessive privileged access.",
|
||||
@@ -494,7 +494,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -517,7 +517,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -540,7 +540,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -561,7 +561,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -584,7 +584,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -605,7 +605,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -626,7 +626,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -647,7 +647,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -668,7 +668,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -691,7 +691,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -712,7 +712,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Identity and Access Management",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
|
||||
@@ -3481,7 +3481,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "10",
|
||||
"Section": "10. Miscellaneous",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource Manager Locks provide a way for administrators to lock down Azure resources to prevent deletion of, or modifications to, a resource. These locks sit outside of the Role Based Access Controls (RBAC) hierarchy and, when applied, will place restrictions on the resource for all users. These locks are very useful when there is an important resource in a subscription that users should not be able to delete or change. Locks can help prevent accidental and malicious changes or deletion.",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,977 @@
|
||||
{
|
||||
"Framework": "ProwlerThreatScore",
|
||||
"Version": "1.0",
|
||||
"Provider": "GCP",
|
||||
"Description": "Prowler ThreatScore Compliance Framework for GCP ensures that the GCP project is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Forensic Readiness and Encryption",
|
||||
"Requirements": [
|
||||
{
|
||||
"Id": "1.1.1",
|
||||
"Description": "Ensure User-Managed/External Keys for Service Accounts Are Rotated Every 90 Days or Fewer",
|
||||
"Checks": [
|
||||
"iam_sa_user_managed_key_rotate_90_days"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "User-Managed/External Keys for Service Accounts Are Rotated Every 90 Days or Fewer",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Service account keys consist of a key ID (private_key_id) and a private key, which are used to authenticate programmatic requests to Google Cloud services. It is recommended to regularly rotate service account keys to enhance security and reduce the risk of unauthorized access.",
|
||||
"AdditionalInformation": "Regularly rotating service account keys minimizes the risk of a compromised, lost, or stolen key being used to access cloud resources. Google-managed keys are automatically rotated daily for internal authentication, ensuring strong security. For user-managed (external) keys, users are responsible for key security, storage, and rotation. Since Google does not retain private keys once generated, proper key management practices must be followed. Google Cloud allows up to 10 external keys per service account, making it easier to rotate them without disruption. Implementing regular key rotation ensures that old keys are not left active, reducing the potential attack surface.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.1.2",
|
||||
"Description": "Ensure API Keys Only Exist for Active Services",
|
||||
"Checks": [
|
||||
"apikeys_key_exists"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "API Keys Only Exist for Active Services",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "API keys should only be used when no other authentication method is available, as they pose significant security risks. Unused API keys with active permissions may still exist within a project, potentially exposing resources to unauthorized access. It is recommended to use standard authentication flows such as OAuth 2.0 or service account authentication instead.",
|
||||
"AdditionalInformation": "API keys are inherently insecure because they: Are simple encrypted strings that can be easily exposed in browsers, client-side applications, or devices. Do not authenticate users or applications making API requests. Can be accidentally leaked in logs, repositories, or web traffic.To enhance security, API keys should be avoided when possible, and unused keys should be deleted to minimize the risk of unauthorized access.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.1.3",
|
||||
"Description": "Ensure API Keys Are Rotated Every 90 Days",
|
||||
"Checks": [
|
||||
"apikeys_key_rotated_in_90_days"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "API Keys Are Rotated Every 90 Days",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "API keys should only be used when no other authentication method is available. If API keys are in use, it is recommended to rotate them every 90 days to minimize security risks.",
|
||||
"AdditionalInformation": "API keys are inherently insecure because: They are simple encrypted strings that can be easily exposed. They do not authenticate users or applications making API requests. They are often accessible to clients, increasing the risk of theft and misuse. Unlike credentials with expiration policies, stolen API keys remain valid indefinitely unless revoked or regenerated. Regularly rotating API keys reduces the risk of unauthorized access by ensuring that compromised keys cannot be used for extended periods. To enhance security, API keys should be rotated every 90 days or as part of a proactive security policy.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.1",
|
||||
"Description": "Ensure That There Are Only GCP-Managed Service Account Keys for Each Service Account",
|
||||
"Checks": [
|
||||
"iam_sa_no_user_managed_keysiam_sa_no_user_managed_keys"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Only GCP-Managed Service Account Keys for Each Service Account",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Service accounts should not use user-managed keys, as they introduce security risks and require manual management. Instead, use Google Cloud-managed keys, which are automatically rotated and secured by Google.",
|
||||
"AdditionalInformation": "User-managed keys are downloadable and manually managed, making them vulnerable to leaks, mismanagement, and unauthorized access. In contrast, GCP-managed keys are non-downloadable, automatically rotated weekly, and securely handled by Google Cloud services like App Engine and Compute Engine. Managing user-generated keys requires key storage, distribution, rotation, revocation, and protectionall of which introduce potential security gaps. Common risks include keys being exposed in source code repositories, left in unsecured locations, or unintentionally shared. To minimize security risks, it is recommended to disable user-managed service account keys and rely on GCP-managed keys instead.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.2",
|
||||
"Description": "Ensure That Service Account Has No Admin Privileges",
|
||||
"Checks": [
|
||||
"iam_sa_no_administrative_privileges"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "SA Has No Admin Privileges",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "A service account is a special Google account assigned to an application or virtual machine (VM) rather than an individual user. It is used to authenticate API requests on behalf of the application. Service accounts should not be granted admin privileges to minimize security risks.",
|
||||
"AdditionalInformation": "Service accounts control resource access based on their assigned roles. Granting admin privileges to a service account allows full control over applications or VMs, enabling actions like deletion, updates, and configuration changes without user intervention. This increases the risk of misconfigurations, privilege escalation, or potential security breaches. To follow the principle of least privilege, it is recommended to restrict admin access for service accounts and assign only the necessary permissions.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.3",
|
||||
"Description": "Ensure That Cloud KMS Cryptokeys Are Not Anonymously or Publicly Accessible",
|
||||
"Checks": [
|
||||
"kms_key_not_publicly_accessible"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud KMS Cryptokeys Are Not Anonymously or Publicly Accessible",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "The IAM policy on Cloud KMS cryptographic keys should not allow anonymous (allUsers) or public (allAuthenticatedUsers) access to prevent unauthorized key usage.",
|
||||
"AdditionalInformation": "Granting permissions to allUsers or allAuthenticatedUsers allows anyone to access the cryptographic keys, which can lead to data exposure, unauthorized encryption/decryption operations, or potential key compromise. This is particularly critical if sensitive data is protected using these keys. To maintain data security and compliance, ensure that Cloud KMS cryptographic keys are only accessible to authorized users, groups, or service accounts and do not have public or anonymous access permissions.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.4",
|
||||
"Description": "Ensure KMS Encryption Keys Are Rotated Within a Period of 90 Days",
|
||||
"Checks": [
|
||||
"kms_key_rotation_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "KMS Encryption Keys Are Rotated Within a Period of 90 Days",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Google Cloud Key Management Service (KMS) organizes cryptographic keys in a hierarchical structure to facilitate secure and efficient access control. Keys should be configured with a defined rotation schedule to ensure their cryptographic strength is maintained over time.",
|
||||
"AdditionalInformation": "Key rotation ensures that new key versions are automatically generated at regular intervals, reducing the risk of key compromise and unauthorized access. The key material (actual encryption bits) changes over time, even though the keys logical identity remains the same. Since cryptographic keys protect sensitive data, setting a specific rotation period ensures that encrypted data remains secure, minimizes the impact of a potential key leak, and aligns with best security practices.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.5",
|
||||
"Description": "Ensure That Separation of Duties Is Enforced While Assigning KMS Related Roles to Users",
|
||||
"Checks": [
|
||||
"iam_role_kms_enforce_separation_of_duties"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Separation of Duties Is Enforced While Assigning KMS Related Roles to Users",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "The principle of Separation of Duties should be enforced when assigning Google Cloud Key Management Service (KMS) roles to users. This prevents excessive privileges and reduces security risks.",
|
||||
"AdditionalInformation": "The Cloud KMS Admin role grants the ability to create, delete, and manage keys, while the Cloud KMS CryptoKey Encrypter/Decrypter, Encrypter, and Decrypter roles control encryption and decryption of data. Granting both administrative and cryptographic privileges to the same user violates the Separation of Duties principle, potentially allowing unauthorized access to sensitive data. To mitigate risks and prevent privilege escalation, no user should hold the Cloud KMS Admin role along with any of the CryptoKey Encrypter/Decrypter roles. Enforcing Separation of Duties helps ensure secure key management and aligns with security best practices.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.6",
|
||||
"Description": "Ensure API Keys Are Restricted to Only APIs That Application Needs Access",
|
||||
"Checks": [
|
||||
"apikeys_api_restrictions_configured"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "API Keys Are Restricted to Only APIs That Application Needs Access",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "API keys should only be used when no other authentication method is available, as they pose a higher security risk due to their public visibility. To minimize exposure, API keys should be restricted to access only the specific APIs required by an application.",
|
||||
"AdditionalInformation": "API keys present several security risks, including: They are simple encrypted strings that can be easily exposed in client-side applications or browsers. They do not authenticate the user or application making API requests. They are often accessible to clients, making them susceptible to discovery and theft. Google recommends using standard authentication methods instead of API keys whenever possible. However, in limited scenarios where API keys are necessary (e.g., mobile applications using Google Cloud Translation API without a backend server), restricting API key access to only the required APIs helps enforce least privilege access and reduces attack surfaces.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.1",
|
||||
"Description": "Ensure That IAM Users Are Not Assigned the Service Account User or Service Account Token Creator Roles at Project Level",
|
||||
"Checks": [
|
||||
"iam_no_service_roles_at_project_level"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "IAM Users Are Not Assigned the SA User or SA Token Creator Roles at Project Level",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "It is recommended to assign the Service Account User (iam.serviceAccountUser) and Service Account Token Creator (iam.serviceAccountTokenCreator) roles to users at the service account level rather than granting them project-wide access.",
|
||||
"AdditionalInformation": "Service accounts are identities used by applications and virtual machines (VMs) to interact with Google Cloud APIs. They also function as resources with IAM policies defining who can use them. Granting service account permissions at the project level allows users to access all service accounts within the project, including any created in the future. This increases the risk of privilege escalation, as users with Compute Instance Admin or App Engine Deployer roles could execute code as a service account, gaining access to additional resources. To enforce the principle of least privilege, users should be assigned service account roles at the specific service account level rather than at the project level. This ensures that each user has access only to the necessary service accounts while preventing unintended privilege escalation.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.2",
|
||||
"Description": "Ensure That Separation of Duties Is Enforced While Assigning Service Account Related Roles to Users",
|
||||
"Checks": [
|
||||
"iam_role_kms_enforce_separation_of_duties"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Separation of Duties Is Enforced While Assigning Service Account Related Roles to Users",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "It is recommended to enforce the principle of Separation of Duties when assigning service account-related IAM roles to users to prevent excessive privileges and security risks.",
|
||||
"AdditionalInformation": "The Service Account Admin role allows a user to create, delete, and manage service accounts, while the Service Account User role allows a user to assign service accounts to applications or compute instances. Granting both roles to the same user violates the Separation of Duties principle, as it would allow an individual to create and assign service accounts, potentially leading to unauthorized access or privilege escalation. To minimize security risks, no user should be assigned both Service Account Admin and Service Account User roles simultaneously. Enforcing Separation of Duties ensures better access control, reduces the risk of privilege abuse, and aligns with security best practices.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.3",
|
||||
"Description": "Ensure That Cloud Audit Logging Is Configured Properly",
|
||||
"Checks": [
|
||||
"iam_audit_logs_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud Audit Logging Is Configured Properly",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Cloud Audit Logging should be configured to track all administrative activities and read/write access to user data. This ensures comprehensive visibility into who accessed or modified resources within a project, folder, or organization.",
|
||||
"AdditionalInformation": "Cloud Audit Logging maintains two types of audit logs: 1. Admin Activity Logs Captures API calls and administrative actions that modify configurations or metadata. These logs are enabled by default and cannot be disabled. 2. Data Access Logs Tracks API calls that create, modify, or read user data. These logs are disabled by default and should be enabled for better monitoring. Data Access Logs provide three types of visibility: Admin Read Tracks metadata or configuration reads. Data Read Logs operations where user-provided data is accessed. Data Write Captures modifications to user-provided data.To ensure effective logging, it is recommended to: 1. Enable DATA_READ logs (for user activity tracking) and DATA_WRITE logs (to track modifications). 2. Apply audit logging to all supported services where Data Access logs are available. 3.Avoid exempting users from audit logs to maintain full tracking capabilities. Properly configuring Cloud Audit Logging helps strengthen security, detect unauthorized access, and ensure compliance with security policies.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.4",
|
||||
"Description": "Ensure Log Metric Filter and Alerts Exist for Project Ownership Assignments/Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_project_ownership_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for Project Ownership Assignments/Changes",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "In order to prevent unnecessary project ownership assignments to users or service accounts and mitigate potential misuse of projects and resources, all role assignments to roles/Owner should be monitored. Users or service accounts assigned the roles/Owner primitive role are considered project owners. The Owner role grants full control over the project, including: full viewer permissions on all GCP services, permissions to modify the state of all services, manage roles and permissions for the project and its resources, and set up billing for the project. Granting the Owner role allows the member to modify the IAM policy, which contains sensitive access control data. To minimize security risks, the Owner role should only be assigned when strictly necessary, and the number of users with this role should be kept to a minimum.",
|
||||
"AdditionalInformation": "Project ownership has the highest level of privileges within a project, making it a high-risk role if misused. To reduce potential security risks, all project ownership assignments and changes should be monitored and alerted to security teams or relevant recipients. Critical events to monitor include: sending project ownership invitations, acceptance or rejection of ownership invites, assigning the roles/Owner role to a user or service account, and removing a user or service account from the roles/Owner role. Monitoring these activities helps prevent unauthorized access, enforces least privilege principles, and improves security auditing and compliance.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.5",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Audit Configuration Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_audit_configuration_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for Audit Configuration Changes",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Google Cloud Platform (GCP) services generate audit log entries in the Admin Activity and Data Access logs, providing visibility into who performed what action, where, and when within GCP projects. These logs capture key details such as the identity of the API caller, timestamp, source IP address, request parameters, and response data. Cloud audit logging records API calls made through the GCP Console, SDKs, command-line tools, and other GCP services, offering a comprehensive activity history for security monitoring and compliance.",
|
||||
"AdditionalInformation": "Admin activity and data access logs play a critical role in security analysis, resource change tracking, and compliance auditing. Configuring metric filters and alerts for audit configuration changes ensures that audit logging remains in its recommended state, allowing organizations to detect and respond to unauthorized modifications while ensuring all project activities remain fully auditable at any time.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.3.6",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Custom Role Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_custom_role_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for Custom Role Changes",
|
||||
"Section": "1. IAM",
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "It is recommended to set up a metric filter and alarm to track changes to Identity and Access Management (IAM) roles, including their creation, deletion, and updates. Google Cloud IAM provides predefined roles for granular access control but also allows organizations to create custom roles to meet specific needs.",
|
||||
"AdditionalInformation": "IAM role modifications can impact security by granting excessive privileges if not properly managed. Monitoring role creation, deletion, and updates helps detect potential misconfigurations or over-privileged roles early, ensuring that only intended access permissions are assigned within the organization.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.1.1",
|
||||
"Description": "Ensure That the Default Network Does Not Exist in a Project ",
|
||||
"Checks": [
|
||||
"compute_network_default_in_use"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Default Network Does Not Exist in a Project ",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "A project should not have a default network to prevent the use of preconfigured and potentially insecure network settings.",
|
||||
"AdditionalInformation": "The default network automatically creates permissive firewall rules, including unrestricted internal traffic, SSH, RDP, and ICMP access, which increases the risk of unauthorized access. Additionally, it is an auto mode network, limiting flexibility in subnet configuration and restricting the use of Cloud VPN or VPC Network Peering. Organizations should create a custom network tailored to their security and networking needs and remove the default network to minimize exposure.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.1.2",
|
||||
"Description": "Ensure Legacy Networks Do Not Exist for Older Projects",
|
||||
"Checks": [
|
||||
"compute_network_not_legacy"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Legacy Networks Do Not Exist for Older Projects",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Projects should not have a legacy network configured to prevent the use of outdated and inflexible networking models. While new projects can no longer create legacy networks, older projects should be checked to ensure they are not still using them.",
|
||||
"AdditionalInformation": "Legacy networks use a single global IPv4 prefix and a single gateway IP for the entire network, lacking subnetting capabilities. This design limits flexibility, prevents migration to auto or custom subnet networks, and can create performance bottlenecks or single points of failure for high-traffic workloads. Removing legacy networks and transitioning to modern networking models improves scalability, security, and resilience.",
|
||||
"LevelOfRisk": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.1.4",
|
||||
"Description": "Ensure That SSH Access Is Restricted From the Internet",
|
||||
"Checks": [
|
||||
"compute_firewall_ssh_access_from_the_internet_allowed"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "SSH Access Is Restricted From the Internet",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "GCP Firewall Rules control ingress and egress traffic within a VPC Network. These rules define traffic conditions such as ports, protocols, and source/destination IPs. Firewall rules operate at the VPC level and cannot be shared across networks. Only IPv4 addresses are supported, and it is crucial to restrict generic (0.0.0.0/0) incoming traffic, particularly for SSH on Port 22, to prevent unauthorized access.",
|
||||
"AdditionalInformation": "Firewall rules regulate traffic flow between instances and external networks. Allowing unrestricted inbound SSH access (0.0.0.0/0 on port 22) increases security risks by exposing instances to unauthorized access and brute-force attacks. To minimize threats, internet-facing access should be limited by specifying granular IP ranges and enforcing least privilege access.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.1.5",
|
||||
"Description": "Ensure That RDP Access Is Restricted From the Internet",
|
||||
"Checks": [
|
||||
"compute_firewall_rdp_access_from_the_internet_allowed"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "RDP Access Is Restricted From the Internet",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "GCP Firewall Rules control incoming (ingress) and outgoing (egress) traffic within a VPC Network. Each rule specifies traffic conditions, including ports, protocols, and source/destination IPs. These rules operate at the VPC level, cannot be shared across networks, and support only IPv4 addresses. To enhance security, unrestricted RDP access (0.0.0.0/0 on port 3389) should be avoided to prevent unauthorized remote connections.",
|
||||
"AdditionalInformation": "Firewall rules regulate traffic flow between instances and external networks. Allowing unrestricted RDP access from the Internet exposes virtual machines (VMs) to unauthorized access and brute-force attacks. To mitigate risks, internet-facing access should be restricted by enforcing least privilege access, defining specific IP ranges, and implementing secure remote access solutions such as Bastion hosts or VPNs.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.1",
|
||||
"Description": "Ensure That Cloud Storage Bucket Is Not Anonymously or Publicly Accessible",
|
||||
"Checks": [
|
||||
"cloudstorage_bucket_public_access"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud Storage Bucket Is Not Anonymously or Publicly Accessible",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "IAM policies on Cloud Storage buckets should not allow anonymous or public access to prevent unauthorized data exposure.",
|
||||
"AdditionalInformation": "Granting public or anonymous access allows anyone to access the buckets contents, posing a security risk, especially if sensitive data is stored. Restricting access ensures that only authorized users can interact with the bucket, reducing the risk of data breaches.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.2",
|
||||
"Description": "Ensure 'user Connections' Database Flag for Cloud Sql Sql Server Instance Is Set to a Non-limiting Value",
|
||||
"Checks": [
|
||||
"cloudsql_instance_sqlserver_user_connections_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "user Connections Database Flag for Cloud Sql Sql Server Instance Is Set to a Non-limiting Value",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Verify the user connection limits for Cloud SQL SQL Server instances to ensure they are not unnecessarily restricting the number of simultaneous connections.",
|
||||
"AdditionalInformation": "The user connections setting controls the maximum number of concurrent user connections allowed on an SQL Server instance. By default, SQL Server dynamically adjusts the number of connections as needed, up to a maximum of 32,767. Setting an artificial limit may prevent new connections from being established, leading to potential data loss or service outages. It is recommended to review and adjust this setting as necessary to avoid disruptions.",
|
||||
"LevelOfRisk": 2
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.3",
|
||||
"Description": "Ensure 'remote access' database flag for Cloud SQL SQL Server instance is set to 'off'",
|
||||
"Checks": [
|
||||
"cloudsql_instance_sqlserver_remote_access_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "remote access database flag for Cloud SQL SQL Server instance is set to off",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Disable the remote access database flag for Cloud SQL SQL Server instances to prevent execution of stored procedures from remote servers.",
|
||||
"AdditionalInformation": "The remote access option allows stored procedures to be executed from or on remote SQL Server instances. By default, this setting is enabled, which could be exploited for unauthorized query execution or Denial-of-Service (DoS) attacks by offloading processing to a target server. Disabling remote access enhances security by restricting stored procedure execution to the local server, reducing potential attack vectors. This recommendation applies to SQL Server database instances.",
|
||||
"LevelOfRisk": 2
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.5",
|
||||
"Description": "Ensure That Cloud SQL Database Instances Do Not Implicitly Whitelist All Public IP Addresses",
|
||||
"Checks": [
|
||||
"cloudsql_instance_public_access"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud SQL Database Instances Do Not Implicitly Whitelist All Public IP Addresses",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Restrict database server access to only trusted networks and IP addresses, preventing connections from public IPs.",
|
||||
"AdditionalInformation": "Allowing unrestricted access to a database server increases the risk of unauthorized access and attacks. To minimize the attack surface, only trusted and necessary IP addresses should be whitelisted. Authorized networks should not be set to 0.0.0.0/0, which permits connections from anywhere. This control applies specifically to instances with public IP addresses.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.6",
|
||||
"Description": "Ensure That Cloud SQL Database Instances Do Not Have Public IPs",
|
||||
"Checks": [
|
||||
"cloudsql_instance_public_access"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud SQL Database Instances Do Not Have Public IPs",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Configure Second Generation Cloud SQL instances to use private IPs instead of public IPs.",
|
||||
"AdditionalInformation": "Using private IPs for Cloud SQL databases enhances security by reducing exposure to external threats. It also improves network performance and lowers latency by keeping traffic within the internal network, minimizing the attack surface of the database.",
|
||||
"LevelOfRisk": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.2.7",
|
||||
"Description": "Ensure That BigQuery Datasets Are Not Anonymously or Publicly Accessible",
|
||||
"Checks": [
|
||||
"bigquery_dataset_public_access"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "BigQuery Datasets Are Not Anonymously or Publicly Accessible",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Ensure that IAM policies on BigQuery datasets do not allow anonymous or public access.",
|
||||
"AdditionalInformation": "Granting access to allUsers or allAuthenticatedUsers permits unrestricted access to the dataset, which can lead to unauthorized data exposure. To protect sensitive information, public or anonymous access should be strictly prohibited.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.3",
|
||||
"Description": "Ensure That Instances Are Not Configured To Use the Default Service Account With Full Access to All Cloud APIs",
|
||||
"Checks": [
|
||||
"compute_instance_default_service_account_in_use_with_full_api_access"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Instances Are Not Configured To Use the Default Service Account With Full Access to All Cloud APIs",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "To enforce the principle of least privilege and prevent potential privilege escalation, instances should not be assigned the Compute Engine default service account with the scope Allow full access to all Cloud APIs.",
|
||||
"AdditionalInformation": "Google Compute Engine provides a default service account for instances to access necessary cloud services. This default service account has the Project Editor role, granting broad permissions over most cloud services except billing. When assigned to an instance, it can operate in three modes: 1.Allow default access Grants minimal required permissions (recommended). 2.Allow full access to all Cloud APIs Grants excessive access to all cloud services (not recommended). 3.Set access for each API Allows administrators to specify required APIs (preferred for least privilege). Assigning an instance the Compute Engine default service account with full access to all APIs can expose cloud operations to unauthorized users based on IAM roles. To reduce security risks, instances should use custom service accounts with minimal required permissions.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.4",
|
||||
"Description": "Ensure Block Project-Wide SSH Keys Is Enabled for VM Instances ",
|
||||
"Checks": [
|
||||
"compute_instance_block_project_wide_ssh_keys_disabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Block Project-Wide SSH Keys Is Enabled for VM Instances ",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Instances should use instance-specific SSH keys instead of project-wide SSH keys to enhance security and reduce the risk of unauthorized access.",
|
||||
"AdditionalInformation": "Project-wide SSH keys are stored in Compute Project metadata and can be used to access all instances within a project. While this simplifies SSH key management, it also increases security risksif a project-wide SSH key is compromised, all instances in the project could be affected. Using instance-specific SSH keys provides better security by limiting access to individual instances, reducing the attack surface in case of key compromise.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.5",
|
||||
"Description": "Ensure Enable Connecting to Serial Ports Is Not Enabled for VM Instance",
|
||||
"Checks": [
|
||||
"compute_instance_serial_ports_in_use"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Enable Connecting to Serial Ports Is Not Enabled for VM Instance",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "The interactive serial console allows direct access to a virtual machines serial ports, similar to using a terminal window. When enabled, it allows connections from any IP address, creating a potential security risk. It is recommended to disable interactive serial console support.",
|
||||
"AdditionalInformation": "A virtual machine instance has four virtual serial ports, often used by the operating system, BIOS, or other system-level entities for input and output. The first serial port (serial port 1) is commonly referred to as the serial console. Unlike SSH, the interactive serial console does not support IP-based access restrictions, meaning anyone with the correct SSH key, username, project ID, zone, and instance name could gain access. This exposes the instance to unauthorized access. To mitigate this risk, interactive serial console support should be disabled unless absolutely necessary.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.6",
|
||||
"Description": "Ensure That IP Forwarding Is Not Enabled on Instances",
|
||||
"Checks": [
|
||||
"compute_instance_ip_forwarding_is_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "IP Forwarding Is Not Enabled on Instances",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Google Compute Engine instances should not forward data packets unless explicitly required for routing purposes. By default, an instance cannot forward packets unless the source IP matches the instances IP address. Similarly, GCP wont deliver packets if the destination IP does not match the instance. To prevent unauthorized data forwarding, it is recommended to disable IP forwarding.",
|
||||
"AdditionalInformation": "When IP forwarding is enabled (canIpForward field), an instance can send and receive packets with non-matching source or destination IPs, effectively allowing it to act as a network router. This can lead to data loss, information disclosure, or unauthorized traffic routing. To maintain security and prevent misuse, IP forwarding should be disabled unless explicitly required for network routing configurations.",
|
||||
"LevelOfRisk": 2
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.7",
|
||||
"Description": "Ensure Compute Instances Are Launched With Shielded VM Enabled",
|
||||
"Checks": [
|
||||
"compute_instance_shielded_vm_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Compute Instances Are Launched With Shielded VM Enabled",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Shielded VMs are hardened virtual machines on Google Cloud Platform (GCP) designed to protect against rootkits, bootkits, and other low-level attacks. They ensure verifiable integrity using Secure Boot, virtual Trusted Platform Module (vTPM)-enabled Measured Boot, and integrity monitoring.",
|
||||
"AdditionalInformation": "Shielded VMs use signed and verified firmware from Googles Certificate Authority to establish a root of trust. Secure Boot ensures only authentic software runs by verifying digital signatures, preventing unauthorized modifications. Integrity monitoring helps detect unexpected changes in the VMs boot process, while vTPM-enabled Measured Boot provides a baseline to compare against future boots. Enabling Shielded VMs enhances security by protecting against malware, unauthorized firmware changes, and persistent threats.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "2.3.8",
|
||||
"Description": "Ensure That Compute Instances Do Not Have Public IP Addresses",
|
||||
"Checks": [
|
||||
"compute_instance_public_ip"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "That Compute Instances Do Not Have Public IP Addresses",
|
||||
"Section": "2. Attack Surface",
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Compute instances should not be assigned external IP addresses to minimize exposure to the internet and reduce security risks.",
|
||||
"AdditionalInformation": "Public IP addresses increase the attack surface of Compute instances, making them more vulnerable to threats. Instead, instances should be placed behind load balancers or use private networking to control access and reduce the risk of unauthorized exposure.",
|
||||
"LevelOfRisk": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.1",
|
||||
"Description": "Ensure That Sinks Are Configured for All Log Entries",
|
||||
"Checks": [
|
||||
"cloudstorage_bucket_log_retention_policy_lock"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Sinks Are Configured for All Log Entries",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to create a log sink to export and store copies of all log entries. This enables log aggregation across multiple projects and allows integration with a Security Information and Event Management (SIEM) system for centralized monitoring.",
|
||||
"AdditionalInformation": "Cloud Logging retains logs for a limited period. To ensure long-term storage and better security analysis, logs should be exported to a destination such as Cloud Storage, BigQuery, or Cloud Pub/Sub. A log sink allows you to: Aggregate logs from multiple projects, folders, or billing accounts. Extend log retention beyond Cloud Loggings default retention period. Send logs to a SIEM system for real-time monitoring and threat detection. To ensure all logs are captured and exported: 1.Create a sink without filters to capture all log entries. 2.Choose an appropriate destination (e.g., Cloud Storage for long-term storage, BigQuery for analysis, or Pub/Sub for real-time processing). 3.Apply logging at the organization level to cover all associated projects. Implementing log sinks enhances security visibility, forensic capabilities, and compliance adherence across cloud environments.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.2",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Firewall Rule Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_vpc_firewall_rule_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for VPC Network Firewall Rule Changes",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) Network Firewall rule changes. Tracking modifications to firewall rules helps ensure that unauthorized or unintended changes do not compromise network security.",
|
||||
"AdditionalInformation": "Firewall rules control ingress and egress traffic within a VPC. Monitoring create or update events provides visibility into network access changes and helps quickly detect potential security threats or misconfigurations, reducing the risk of unauthorized access.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.3",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Route Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_vpc_network_route_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for VPC Network Route Changes",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network route changes. Keeping track of modifications ensures that unauthorized or unintended changes do not disrupt expected network traffic flow.",
|
||||
"AdditionalInformation": "GCP routes define how network traffic is directed between VM instances and external destinations. Monitoring route table changes helps ensure that traffic follows the intended path, preventing misconfigurations or malicious alterations that could lead to data exposure or connectivity issues.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.4",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for VPC Network Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_vpc_network_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for VPC Network Changes",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network changes. This helps track modifications to VPC configurations and peer connections, ensuring that network traffic remains secure and follows the intended paths.",
|
||||
"AdditionalInformation": "It is recommended to configure a metric filter and alarm to monitor Virtual Private Cloud (VPC) network changes. This helps track modifications to VPC configurations and peer connections, ensuring that network traffic remains secure and follows the intended paths.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.5",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for Cloud Storage IAM Permission Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_bucket_permission_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for Cloud Storage IAM Permission Changes",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to set up a metric filter and alarm to monitor Cloud Storage Bucket IAM changes. This ensures that any modifications to bucket permissions are tracked and reviewed in a timely manner.",
|
||||
"AdditionalInformation": "Monitoring changes to Cloud Storage IAM policies helps detect and correct unauthorized access or overly permissive configurations. This reduces the risk of data exposure or breaches by ensuring that sensitive storage buckets and their contents remain properly secured.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.6",
|
||||
"Description": "Ensure That the Log Metric Filter and Alerts Exist for SQL Instance Configuration Changes",
|
||||
"Checks": [
|
||||
"logging_log_metric_filter_and_alert_for_sql_instance_configuration_changes_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log Metric Filter and Alerts Exist for SQL Instance Configuration Changes",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "It is recommended to configure a metric filter and alarm to track SQL instance configuration changes. This helps in detecting and addressing misconfigurations that may impact security, availability, and compliance.",
|
||||
"AdditionalInformation": "Monitoring SQL instance configuration changes ensures that critical security settings remain properly configured. Misconfigurations, such as disabling auto backups, allowing untrusted networks, or modifying high availability settings, can lead to data loss, security vulnerabilities, or operational disruptions. Early detection of such changes helps maintain a secure and resilient SQL environment.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.7",
|
||||
"Description": "Ensure Logging is enabled for HTTP(S) Load Balancer ",
|
||||
"Checks": [
|
||||
"compute_loadbalancer_logging_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Logging is enabled for HTTP(S) Load Balancer ",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enabling logging on an HTTPS Load Balancer captures all network traffic and its destination, providing visibility into requests made to your web applications.",
|
||||
"AdditionalInformation": "Logging HTTPS network traffic helps monitor access patterns, troubleshoot issues, and enhance security by detecting suspicious activity or unauthorized access attempts.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.8",
|
||||
"Description": "Ensure that VPC Flow Logs is Enabled for Every Subnet in a VPC Network",
|
||||
"Checks": [
|
||||
"compute_subnet_flow_logs_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "VPC Flow Logs is Enabled for Every Subnet in a VPC Network",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Flow Logs capture and record IP traffic to and from network interfaces within VPC subnets. These logs are stored in Stackdriver Logging, allowing users to analyze traffic patterns, detect anomalies, and optimize network performance. It is recommended to enable Flow Logs for all critical VPC subnets to enhance network visibility and security.",
|
||||
"AdditionalInformation": "VPC Flow Logs provide detailed insights into inbound and outbound traffic for virtual machines (VMs), whether they communicate with other VMs, on-premises data centers, Google services, or external networks. Enabling Flow Logs supports: Network monitoring Traffic analysis and cost optimization Incident investigation and forensics Real-time security threat detection For effective monitoring, Flow Logs should be configured to capture all traffic, use granular logging intervals, avoid log filtering, and include metadata for detailed investigations. Note that subnets reserved for internal HTTP(S) load balancing do not support Flow Logs.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.9",
|
||||
"Description": "Ensure That the Log_connections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_connections_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_connections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_connections setting should be enabled to log all attempted connections to the PostgreSQL server, including successful client authentication.",
|
||||
"AdditionalInformation": "By default, PostgreSQL does not log connection attempts, making it harder to detect unauthorized access. Enabling log_connections provides visibility into all connection attempts, aiding in troubleshooting and identifying unusual or suspicious access patterns. This is particularly useful for security monitoring and incident response.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.10",
|
||||
"Description": "Ensure That the Log_disconnections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_disconnections_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_disconnections Database Flag for Cloud SQL PostgreSQL Instance Is Set to On",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_disconnections setting should be enabled to log the end of each PostgreSQL session, including session duration.",
|
||||
"AdditionalInformation": "By default, PostgreSQL does not log session termination details, making it difficult to track session activity. Enabling log_disconnections helps monitor session durations and detect unusual activity. Combined with log_connections, it provides a complete audit trail of user access, aiding in troubleshooting and security monitoring.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.11",
|
||||
"Description": "Ensure Log_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set Appropriately",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_statement_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set Appropriately",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_statement setting in PostgreSQL determines which SQL statements are logged. Acceptable values include none, ddl, mod, and all. A recommended setting is ddl, which logs all data definition statements unless otherwise specified by the organizations logging policy.",
|
||||
"AdditionalInformation": "Proper SQL statement logging is crucial for auditing and forensic analysis. If too many statements are logged, it can become difficult to extract relevant information; if too few are logged, critical details may be missing. Setting log_statement to an appropriate value, such as ddl, ensures a balance between comprehensive auditing and log manageability, aiding in database security and compliance.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.12",
|
||||
"Description": "Ensure that the Log_min_messages Flag for a Cloud SQL PostgreSQL Instance is set at minimum to 'Warning'",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_min_messages_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_min_messages Flag for a Cloud SQL PostgreSQL Instance is set at minimum to Warning",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_min_messages setting in PostgreSQL defines the minimum severity level for messages to be logged as errors. Accepted values range from DEBUG5 (least severe) to PANIC (most severe). Best practice is to set this value to ERROR, ensuring that only critical issues are logged unless an organizations policy requires a different threshold.",
|
||||
"AdditionalInformation": "Proper logging is essential for troubleshooting and forensic analysis. If log_min_messages is not configured correctly, important error messages may be missed or unnecessary logs may clutter records. Setting this parameter to ERROR helps maintain a balance between capturing relevant issues and avoiding excessive log noise, improving system monitoring and security.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.13",
|
||||
"Description": "Ensure Log_min_error_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to Error or Stricter",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_min_error_statement_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_min_error_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to Error or Stricter",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_min_error_statement setting in PostgreSQL defines the minimum severity level for statements to be logged as errors. Valid values range from DEBUG5 (least severe) to PANIC (most severe). It is recommended to set this value to ERROR or stricter to ensure only relevant error statements are logged.",
|
||||
"AdditionalInformation": "Proper logging aids in troubleshooting and forensic analysis. If log_min_error_statement is set too leniently, excessive log entries may make it difficult to identify actual errors. Conversely, if it is set too strictly, important errors may be missed. Setting this parameter to ERROR or higher ensures that significant issues are recorded while avoiding unnecessary log clutter.",
|
||||
"LevelOfRisk": 3
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.14",
|
||||
"Description": "Ensure That the Log_min_duration_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to '-1' (Disabled) ",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_log_min_duration_statement_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Log_min_duration_statement Database Flag for Cloud SQL PostgreSQL Instance Is Set to -1 (Disabled) ",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "The log_min_duration_statement setting in PostgreSQL determines the minimum execution time (in milliseconds) required for a statement to be logged. It is recommended to disable this setting by setting its value to -1.",
|
||||
"AdditionalInformation": "Logging SQL statements may expose sensitive information, which could lead to security risks if recorded in logs. Disabling this setting ensures that confidential data is not inadvertently captured. This recommendation applies to PostgreSQL database instances.",
|
||||
"LevelOfRisk": 3
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.1.15",
|
||||
"Description": "Ensure That 'cloudsql.enable_pgaudit' Database Flag for each Cloud Sql Postgresql Instance Is Set to 'on' For Centralized Logging",
|
||||
"Checks": [
|
||||
"cloudsql_instance_postgres_enable_pgaudit_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "cloudsql.enable_pgaudit Database Flag for each Cloud Sql Postgresql Instance Is Set to on For Centralized Logging",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Ensure that the cloudsql.enable_pgaudit database flag is set to on for Cloud SQL PostgreSQL instances to enable centralized logging and auditing.",
|
||||
"AdditionalInformation": "Enabling the pgaudit extension provides detailed session and object-level logging, which helps organizations comply with security standards such as government, financial, and ISO regulations. This logging capability enhances threat detection by monitoring security events on the database instance. Additionally, enabling this flag allows logs to be sent to Google Logs Explorer for centralized access and monitoring. This recommendation applies specifically to PostgreSQL database instances.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.2.1",
|
||||
"Description": "Ensure That Retention Policies on Cloud Storage Buckets Used for Exporting Logs Are Configured Using Bucket Lock",
|
||||
"Checks": [
|
||||
"cloudstorage_bucket_log_retention_policy_lock"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Retention Policies on Cloud Storage Buckets Used for Exporting Logs Are Configured Using Bucket Lock ",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Enabling retention policies on log storage buckets prevents logs from being overwritten or accidentally deleted. It is recommended to configure retention policies and enable Bucket Lock for all storage buckets used as log sinks.",
|
||||
"AdditionalInformation": "Cloud Logging allows logs to be exported to storage buckets through sinks. Without a retention policy, logs can be altered or deleted, making it difficult to perform security investigations or comply with audit requirements. To ensure logs remain intact for forensics and security analysis: 1.Set a retention policy on log storage buckets to prevent early deletion. 2.Enable Bucket Lock to make the policy immutable, ensuring logs cannot be altered even by privileged users. 3.Apply appropriate access controls to protect logs from unauthorized access. By implementing retention policies and Bucket Lock, organizations preserve critical security logs, prevent attackers from covering their tracks, and enhance compliance with security regulations.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.3.1",
|
||||
"Description": "Ensure Cloud Asset Inventory Is Enabled",
|
||||
"Checks": [
|
||||
"iam_cloud_asset_inventory_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud Asset Inventory Enabled",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Google Cloud Asset Inventory provides a historical view of GCP resources and IAM policies using a time-series database. It captures metadata on cloud resources, policy configurations, and runtime data. Enabling Cloud Asset Inventory allows for efficient searching and exporting of asset data.",
|
||||
"AdditionalInformation": "Cloud Asset Inventory enhances security analysis, resource change tracking, and compliance auditing by maintaining a detailed history of GCP resources and their configurations. Enabling it across all GCP projects ensures visibility into changes, helping organizations detect misconfigurations, track policy changes, and strengthen security posture.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "3.3.2",
|
||||
"Description": "Ensure 'Access Approval' is 'Enabled'",
|
||||
"Checks": [
|
||||
"iam_account_access_approval_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Access Aproval Enabled",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "GCP Access Approval allows organizations to require explicit approval before Google support personnel can access their projects. Administrators can assign security roles in IAM to specific users who can review and approve these requests. Notifications of access requests, including the requesting Google employees details, are sent via email or Pub/Sub messages, providing transparency and control.",
|
||||
"AdditionalInformation": "Managing who accesses your organizations data is critical for information security. While Google support may require access for troubleshooting, Access Approval ensures that access is only granted when explicitly authorized. This feature adds an additional layer of security and logging, ensuring that only approved Google personnel can access sensitive information.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.1.1",
|
||||
"Description": "Ensure That DNSSEC Is Enabled for Cloud DNS ",
|
||||
"Checks": [
|
||||
"dns_dnssec_disabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "DNSSEC Is Enabled for Cloud DNS ",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Cloud DNS provides a scalable and reliable domain name system (DNS) service. Domain Name System Security Extensions (DNSSEC) enhance DNS security by protecting domains against DNS hijacking, man-in-the-middle attacks, and other threats.",
|
||||
"AdditionalInformation": "DNSSEC cryptographically signs DNS records, ensuring the integrity and authenticity of DNS responses. Without DNSSEC, attackers can manipulate DNS lookups, redirecting users to malicious websites through DNS hijacking or spoofing attacks. Enabling DNSSEC helps prevent unauthorized modifications to DNS records, reducing the risk of phishing, malware distribution, and other cyber threats.",
|
||||
"LevelOfRisk": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.1.2",
|
||||
"Description": "Ensure That the Cloud SQL Database Instance Requires All Incoming Connections To Use SSL",
|
||||
"Checks": [
|
||||
"cloudsql_instance_ssl_connections"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Cloud SQL Database Instance Requires All Incoming Connections To Use SSL",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Require all incoming connections to SQL database instances to use SSL encryption.",
|
||||
"AdditionalInformation": "Unencrypted SQL database connections are vulnerable to man-in-the-middle (MITM) attacks, which can expose sensitive data such as credentials, queries, and results. Enforcing SSL ensures secure communication by encrypting data in transit, protecting against interception and unauthorized access. This recommendation applies to PostgreSQL, MySQL (Generation 1 and 2), and SQL Server 2017 instances.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.1.3",
|
||||
"Description": "Ensure That RSASHA1 Is Not Used for the Key-Signing Key in Cloud DNS DNSSEC",
|
||||
"Checks": [
|
||||
"dns_rsasha1_in_use_to_key_sign_in_dnssec"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "RSASHA1 Is Not Used for the Key-Signing Key in Cloud DNS DNSSEC",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "DNSSEC (Domain Name System Security Extensions) relies on cryptographic algorithms to ensure the integrity and authenticity of DNS responses. It is important to use strong and recommended algorithms for key signing to maintain robust security. SHA-1 is deprecated and requires explicit approval from Google if used.",
|
||||
"AdditionalInformation": "DNSSEC signing algorithms play a critical role in securing DNS transactions. Using weak or outdated algorithms can expose DNS infrastructure to spoofing, hijacking, and other attacks. Organizations should select recommended and secure algorithms when enabling DNSSEC to protect DNS records from unauthorized modifications. If adjustments to DNSSEC settings are required, DNSSEC must be disabled and re-enabled with the updated configurations.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.1.4",
|
||||
"Description": "Ensure That RSASHA1 Is Not Used for the Zone-Signing Key in Cloud DNS DNSSEC",
|
||||
"Checks": [
|
||||
"dns_rsasha1_in_use_to_zone_sign_in_dnssec"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "RSASHA1 Is Not Used for the Zone-Signing Key in Cloud DNS DNSSEC",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "DNSSEC (Domain Name System Security Extensions) enhances DNS security by using cryptographic algorithms for zone signing and transaction security. It is essential to use strong and recommended algorithms for key signing. SHA-1 has been deprecated and requires Googles explicit approval and a support contract if used.",
|
||||
"AdditionalInformation": "Using weak or outdated cryptographic algorithms compromises DNS integrity and exposes systems to threats like spoofing and hijacking. Organizations should ensure that DNSSEC settings use strong, recommended algorithms. If DNSSEC is already enabled and changes are needed, it must be disabled and re-enabled with updated configurations to apply the changes effectively.",
|
||||
"LevelOfRisk": 2
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.2.1",
|
||||
"Description": "Ensure VM Disks for Critical VMs Are Encrypted With Customer-Supplied Encryption Keys (CSEK)",
|
||||
"Checks": [
|
||||
"compute_instance_encryption_with_csek_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "VM Disks for Critical VMs Are Encrypted With Customer-Supplied Encryption Keys (CSEK)",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Customer-Supplied Encryption Keys (CSEK) is a feature available in Google Cloud Storage and Google Compute Engine, allowing users to supply their own encryption keys. When you provide your key, Google uses it to protect the Google-generated keys that are responsible for encrypting and decrypting your data. By default, Google Compute Engine encrypts all data at rest automatically, managing this encryption for you with no additional action required. However, if you wish to have full control over the encryption process, you can choose to supply your own encryption keys.",
|
||||
"AdditionalInformation": "By default, Compute Engine automatically encrypts all data at rest, with the service managing the encryption without any further input required from you or your application. However, if you require complete control over encryption, you have the option to provide your own encryption keys to manage the encryption of instance disks.",
|
||||
"LevelOfRisk": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.2.2",
|
||||
"Description": "Ensure that Dataproc Cluster is encrypted using Customer-Managed Encryption Key",
|
||||
"Checks": [
|
||||
"dataproc_encrypted_with_cmks_disabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Dataproc Cluster is encrypted using Customer-Managed Encryption Key",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "When using Dataproc, the data associated with clusters and jobs is stored on Persistent Disks (PDs) linked to the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This data is encrypted using a Google-generated Data Encryption Key (DEK) and a Key Encryption Key (KEK). The Customer-Managed Encryption Keys (CMEK) feature allows you to create, use, and revoke the KEK, although Google still controls the DEK used to encrypt the data.",
|
||||
"AdditionalInformation": "Dataproc cluster data is encrypted using Google-managed keys: the Data Encryption Key (DEK) and the Key Encryption Key (KEK). If you wish to have control over the encryption of your cluster data, you can use your own Customer-Managed Keys (CMKs). Cloud KMS Customer-Managed Keys can add an extra layer of security and are commonly used in environments with strict compliance and security requirements.",
|
||||
"LevelOfRisk": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.2.3",
|
||||
"Description": "Ensure BigQuery datasets are encrypted with Customer-Managed Keys (CMKs).",
|
||||
"Checks": [
|
||||
"bigquery_dataset_cmk_encryption"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "BigQuery datasets are encrypted with Customer-Managed Keys (CMKs).",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Ensure that BigQuery datasets are encrypted using Customer-Managed Keys (CMKs) to gain more granular control over the data encryption and decryption process.",
|
||||
"AdditionalInformation": "For enhanced control over encryption, Customer-Managed Encryption Keys (CMEK) can be implemented as a key management solution for BigQuery datasets.",
|
||||
"LevelOfRisk": 3
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "4.2.4",
|
||||
"Description": "Ensure BigQuery tables are encrypted with Customer-Managed Keys (CMKs).",
|
||||
"Checks": [
|
||||
"bigquery_table_cmk_encryption"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "BigQuery tables are encrypted with Customer-Managed Keys (CMKs).",
|
||||
"Section": "4. Encryption",
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Ensure that BigQuery tables are encrypted using Customer-Managed Keys (CMKs) for more granular control over the data encryption and decryption process.",
|
||||
"AdditionalInformation": "For greater control over encryption, Customer-Managed Encryption Keys (CMEK) can be utilized as the key management solution for BigQuery tables.",
|
||||
"LevelOfRisk": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -327,7 +327,6 @@ aws:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
|
||||
@@ -183,6 +183,18 @@ class KISA_ISMSP_Requirement_Attribute(BaseModel):
|
||||
NonComplianceCases: Optional[list[str]]
|
||||
|
||||
|
||||
# Prowler ThreatScore Requirement Attribute
|
||||
class Prowler_ThreatScore_Requirement_Attribute(BaseModel):
|
||||
"""Prowler ThreatScore Requirement Attribute"""
|
||||
|
||||
Title: str
|
||||
Section: str
|
||||
SubSection: str
|
||||
AttributeDescription: str
|
||||
AdditionalInformation: str
|
||||
LevelOfRisk: int
|
||||
|
||||
|
||||
# Base Compliance Model
|
||||
# TODO: move this to compliance folder
|
||||
class Compliance_Requirement(BaseModel):
|
||||
@@ -198,6 +210,7 @@ class Compliance_Requirement(BaseModel):
|
||||
ISO27001_2013_Requirement_Attribute,
|
||||
AWS_Well_Architected_Requirement_Attribute,
|
||||
KISA_ISMSP_Requirement_Attribute,
|
||||
Prowler_ThreatScore_Requirement_Attribute,
|
||||
# Generic_Compliance_Requirement_Attribute must be the last one since it is the fallback for generic compliance framework
|
||||
Generic_Compliance_Requirement_Attribute,
|
||||
]
|
||||
|
||||
@@ -11,6 +11,9 @@ from prowler.lib.outputs.compliance.kisa_ismsp.kisa_ismsp import get_kisa_ismsp_
|
||||
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack import (
|
||||
get_mitre_attack_table,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore import (
|
||||
get_prowler_threatscore_table,
|
||||
)
|
||||
|
||||
|
||||
def display_compliance_table(
|
||||
@@ -72,6 +75,15 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "threatscore_" in compliance_framework:
|
||||
get_prowler_threatscore_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
else:
|
||||
get_generic_compliance_table(
|
||||
findings,
|
||||
|
||||
@@ -31,14 +31,21 @@ class ComplianceOutput(Output):
|
||||
compliance: Compliance,
|
||||
file_path: str = None,
|
||||
file_extension: str = "",
|
||||
from_cli: bool = True,
|
||||
) -> None:
|
||||
# TODO: This class needs to be refactored to use the Output class init, methods and properties
|
||||
self._data = []
|
||||
self.close_file = False
|
||||
self.file_path = file_path
|
||||
self.file_descriptor = None
|
||||
# This parameter is to avoid refactoring more code, the CLI does not write in batches, the API does
|
||||
self._from_cli = from_cli
|
||||
|
||||
if not file_extension and file_path:
|
||||
self._file_extension = "".join(Path(file_path).suffixes)
|
||||
if file_extension:
|
||||
self._file_extension = file_extension
|
||||
self.file_path = f"{file_path}{self.file_extension}"
|
||||
|
||||
if findings:
|
||||
# Get the compliance name of the model
|
||||
@@ -49,7 +56,7 @@ class ComplianceOutput(Output):
|
||||
)
|
||||
self.transform(findings, compliance, compliance_name)
|
||||
if not self._file_descriptor and file_path:
|
||||
self.create_file_descriptor(file_path)
|
||||
self.create_file_descriptor(self.file_path)
|
||||
|
||||
def batch_write_data_to_file(self) -> None:
|
||||
"""
|
||||
@@ -69,12 +76,14 @@ class ComplianceOutput(Output):
|
||||
fieldnames=[field.upper() for field in self._data[0].dict().keys()],
|
||||
delimiter=";",
|
||||
)
|
||||
csv_writer.writeheader()
|
||||
if self._file_descriptor.tell() == 0:
|
||||
csv_writer.writeheader()
|
||||
for finding in self._data:
|
||||
csv_writer.writerow(
|
||||
{k.upper(): v for k, v in finding.dict().items()}
|
||||
)
|
||||
self._file_descriptor.close()
|
||||
if self.close_file or self._from_cli:
|
||||
self._file_descriptor.close()
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ProwlerThreatScoreAWSModel(BaseModel):
|
||||
"""
|
||||
ProwlerThreatScoreAWSModel generates a finding's output in AWS Prowler ThreatScore Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
AccountId: str
|
||||
Region: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Title: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_AttributeDescription: str
|
||||
Requirements_Attributes_AdditionalInformation: str
|
||||
Requirements_Attributes_LevelOfRisk: int
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
|
||||
|
||||
class ProwlerThreatScoreAzureModel(BaseModel):
|
||||
"""
|
||||
ProwlerThreatScoreAzureModel generates a finding's output in Azure Prowler ThreatScore Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
SubscriptionId: str
|
||||
Location: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Title: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_AttributeDescription: str
|
||||
Requirements_Attributes_AdditionalInformation: str
|
||||
Requirements_Attributes_LevelOfRisk: int
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
|
||||
|
||||
class ProwlerThreatScoreGCPModel(BaseModel):
|
||||
"""
|
||||
ProwlerThreatScoreGCPModel generates a finding's output in GCP Prowler ThreatScore Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
ProjectId: str
|
||||
Location: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Title: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_AttributeDescription: str
|
||||
Requirements_Attributes_AdditionalInformation: str
|
||||
Requirements_Attributes_LevelOfRisk: int
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
@@ -0,0 +1,121 @@
|
||||
from colorama import Fore, Style
|
||||
from tabulate import tabulate
|
||||
|
||||
from prowler.config.config import orange_color
|
||||
|
||||
|
||||
def get_prowler_threatscore_table(
|
||||
findings: list,
|
||||
bulk_checks_metadata: dict,
|
||||
compliance_framework: str,
|
||||
output_filename: str,
|
||||
output_directory: str,
|
||||
compliance_overview: bool,
|
||||
):
|
||||
pillar_table = {
|
||||
"Provider": [],
|
||||
"Pillar": [],
|
||||
"Status": [],
|
||||
"Score": [],
|
||||
"Muted": [],
|
||||
}
|
||||
pass_count = []
|
||||
fail_count = []
|
||||
muted_count = []
|
||||
pillars = {}
|
||||
score_per_pillar = {}
|
||||
number_findings_per_pillar = {}
|
||||
for index, finding in enumerate(findings):
|
||||
check = bulk_checks_metadata[finding.check_metadata.CheckID]
|
||||
check_compliances = check.Compliance
|
||||
for compliance in check_compliances:
|
||||
if compliance.Framework == "ProwlerThreatScore":
|
||||
for requirement in compliance.Requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
pillar = attribute.Section
|
||||
|
||||
if pillar not in score_per_pillar.keys():
|
||||
score_per_pillar[pillar] = 0
|
||||
number_findings_per_pillar[pillar] = 0
|
||||
if finding.status == "FAIL" and not finding.muted:
|
||||
score_per_pillar[pillar] += attribute.LevelOfRisk
|
||||
number_findings_per_pillar[pillar] += 1
|
||||
|
||||
if pillar not in pillars:
|
||||
pillars[pillar] = {"FAIL": 0, "PASS": 0, "Muted": 0}
|
||||
|
||||
if finding.muted:
|
||||
if index not in muted_count:
|
||||
muted_count.append(index)
|
||||
pillars[pillar]["Muted"] += 1
|
||||
else:
|
||||
if finding.status == "FAIL" and index not in fail_count:
|
||||
fail_count.append(index)
|
||||
pillars[pillar]["FAIL"] += 1
|
||||
elif finding.status == "PASS" and index not in pass_count:
|
||||
pass_count.append(index)
|
||||
pillars[pillar]["PASS"] += 1
|
||||
|
||||
pillars = dict(sorted(pillars.items()))
|
||||
for pillar in pillars:
|
||||
pillar_table["Provider"].append(compliance.Provider)
|
||||
pillar_table["Pillar"].append(pillar)
|
||||
if number_findings_per_pillar[pillar] == 0:
|
||||
pillar_table["Score"].append(
|
||||
f"{Style.BRIGHT}{Fore.GREEN}0{Style.RESET_ALL}"
|
||||
)
|
||||
else:
|
||||
pillar_table["Score"].append(
|
||||
f"{Style.BRIGHT}{Fore.RED}{score_per_pillar[pillar] / number_findings_per_pillar[pillar]:.2f}/5{Style.RESET_ALL}"
|
||||
)
|
||||
if pillars[pillar]["FAIL"] > 0:
|
||||
pillar_table["Status"].append(
|
||||
f"{Fore.RED}FAIL({pillars[pillar]['FAIL']}){Style.RESET_ALL}"
|
||||
)
|
||||
else:
|
||||
pillar_table["Status"].append(
|
||||
f"{Fore.GREEN}PASS({pillars[pillar]['PASS']}){Style.RESET_ALL}"
|
||||
)
|
||||
pillar_table["Muted"].append(
|
||||
f"{orange_color}{pillars[pillar]['Muted']}{Style.RESET_ALL}"
|
||||
)
|
||||
|
||||
if (
|
||||
len(fail_count) + len(pass_count) + len(muted_count) > 1
|
||||
): # If there are no resources, don't print the compliance table
|
||||
print(
|
||||
f"\nCompliance Status of {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Framework:"
|
||||
)
|
||||
total_findings_count = len(fail_count) + len(pass_count) + len(muted_count)
|
||||
overview_table = [
|
||||
[
|
||||
f"{Fore.RED}{round(len(fail_count) / total_findings_count * 100, 2)}% ({len(fail_count)}) FAIL{Style.RESET_ALL}",
|
||||
f"{Fore.GREEN}{round(len(pass_count) / total_findings_count * 100, 2)}% ({len(pass_count)}) PASS{Style.RESET_ALL}",
|
||||
f"{orange_color}{round(len(muted_count) / total_findings_count * 100, 2)}% ({len(muted_count)}) MUTED{Style.RESET_ALL}",
|
||||
]
|
||||
]
|
||||
print(tabulate(overview_table, tablefmt="rounded_grid"))
|
||||
if not compliance_overview:
|
||||
if len(fail_count) > 0 and len(pillar_table["Pillar"]) > 0:
|
||||
print(
|
||||
f"\nFramework {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Results:"
|
||||
)
|
||||
|
||||
print(
|
||||
tabulate(
|
||||
pillar_table,
|
||||
tablefmt="rounded_grid",
|
||||
headers="keys",
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
f"{Style.BRIGHT}\n=== Risk Score Guide ===\nScore ranges from 1 (lowest risk) to 5 (highest risk), indicating the severity of the potential impact.{Style.RESET_ALL}"
|
||||
)
|
||||
print(
|
||||
f"{Style.BRIGHT}(Only sections containing results appear, the score is calculated as the sum of the level of risk of the failed findings divided by the number of failed findings){Style.RESET_ALL}"
|
||||
)
|
||||
print(f"\nDetailed results of {compliance_framework.upper()} are in:")
|
||||
print(
|
||||
f" - CSV: {output_directory}/compliance/{output_filename}_{compliance_framework}.csv\n"
|
||||
)
|
||||
@@ -0,0 +1,91 @@
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
|
||||
ProwlerThreatScoreAWSModel,
|
||||
)
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class ProwlerThreatScoreAWS(ComplianceOutput):
|
||||
"""
|
||||
This class represents the AWS Prowler ThreatScore compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into AWS Prowler ThreatScore compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into AWS Prowler ThreatScore compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreAWSModel(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
AccountId=finding.account_uid,
|
||||
Region=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreAWSModel(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
AccountId="",
|
||||
Region="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -0,0 +1,91 @@
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
|
||||
ProwlerThreatScoreAzureModel,
|
||||
)
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class ProwlerThreatScoreAzure(ComplianceOutput):
|
||||
"""
|
||||
This class represents the Azure Prowler ThreatScore compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into Azure Prowler ThreatScore compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into Azure Prowler ThreatScore compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreAzureModel(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
SubscriptionId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreAzureModel(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
SubscriptionId="",
|
||||
Location="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -0,0 +1,91 @@
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
|
||||
ProwlerThreatScoreGCPModel,
|
||||
)
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class ProwlerThreatScoreGCP(ComplianceOutput):
|
||||
"""
|
||||
This class represents the GCP Prowler ThreatScore compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into GCP Prowler ThreatScore compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into GCP Prowler ThreatScore compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreGCPModel(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
ProjectId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = ProwlerThreatScoreGCPModel(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
ProjectId="",
|
||||
Location="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Title=attribute.Title,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -106,8 +106,7 @@ class HTML(Output):
|
||||
"""
|
||||
try:
|
||||
file_descriptor.write(
|
||||
f"""
|
||||
<!DOCTYPE html>
|
||||
f"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||
|
||||
@@ -4,6 +4,9 @@ import queue
|
||||
import re
|
||||
import subprocess
|
||||
import threading
|
||||
from typing import Union
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
|
||||
|
||||
class PowerShellSession:
|
||||
@@ -100,7 +103,9 @@ class PowerShellSession:
|
||||
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
||||
return ansi_escape.sub("", text)
|
||||
|
||||
def execute(self, command: str) -> dict:
|
||||
def execute(
|
||||
self, command: str, json_parse: bool = False, timeout: int = 10
|
||||
) -> Union[str, dict]:
|
||||
"""
|
||||
Send a command to PowerShell and retrieve its output.
|
||||
|
||||
@@ -120,31 +125,41 @@ class PowerShellSession:
|
||||
"""
|
||||
self.process.stdin.write(f"{command}\n")
|
||||
self.process.stdin.write(f"Write-Output '{self.END}'\n")
|
||||
return self.json_parse_output(self.read_output())
|
||||
self.process.stdin.write(f"Write-Error '{self.END}'\n")
|
||||
return (
|
||||
self.json_parse_output(self.read_output(timeout=timeout))
|
||||
if json_parse
|
||||
else self.read_output(timeout=timeout)
|
||||
)
|
||||
|
||||
def read_output(self, timeout: int = 10, default: str = "") -> str:
|
||||
"""
|
||||
Read output from a process with timeout functionality.
|
||||
|
||||
This method reads lines from process stdout until it encounters the END marker
|
||||
or the stream ends. If reading takes longer than the timeout period, the method
|
||||
returns a default value while allowing the reading to continue in the background.
|
||||
This method reads lines from process stdout and stderr in separate threads until it encounters
|
||||
the END marker for each stream. If reading stdout takes longer than the timeout period,
|
||||
the method returns a default value while allowing the reading to continue in the background.
|
||||
|
||||
Any errors from stderr are logged but do not affect the return value.
|
||||
|
||||
Args:
|
||||
timeout (int, optional): Maximum time in seconds to wait for output.
|
||||
timeout (int, optional): Maximum time in seconds to wait for stdout output.
|
||||
Defaults to 10.
|
||||
default (str, optional): Value to return if timeout occurs.
|
||||
default (str, optional): Value to return if stdout timeout occurs.
|
||||
Defaults to empty string.
|
||||
|
||||
Returns:
|
||||
str: Concatenated output lines or default value if timeout occurs.
|
||||
str: The stdout output if available, otherwise the default value.
|
||||
Errors from stderr are logged but not returned.
|
||||
|
||||
Note:
|
||||
This method uses a daemon thread to read the output asynchronously,
|
||||
This method uses daemon threads to read stdout and stderr asynchronously,
|
||||
ensuring that the main thread is not blocked.
|
||||
"""
|
||||
output_lines = []
|
||||
result_queue = queue.Queue()
|
||||
error_lines = []
|
||||
error_queue = queue.Queue()
|
||||
|
||||
def reader_thread():
|
||||
try:
|
||||
@@ -154,17 +169,39 @@ class PowerShellSession:
|
||||
break
|
||||
output_lines.append(line)
|
||||
result_queue.put("\n".join(output_lines))
|
||||
except Exception as e:
|
||||
result_queue.put(str(e))
|
||||
except Exception as error:
|
||||
result_queue.put(str(error))
|
||||
|
||||
def error_reader_thread():
|
||||
try:
|
||||
while True:
|
||||
line = self.remove_ansi(self.process.stderr.readline().strip())
|
||||
if line == f"Write-Error: {self.END}":
|
||||
break
|
||||
error_lines.append(line)
|
||||
error_queue.put("\n".join(error_lines))
|
||||
except Exception as error:
|
||||
error_queue.put(str(error))
|
||||
|
||||
thread = threading.Thread(target=reader_thread)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
error_thread = threading.Thread(target=error_reader_thread)
|
||||
error_thread.daemon = True
|
||||
error_thread.start()
|
||||
|
||||
error_result = None
|
||||
try:
|
||||
return result_queue.get(timeout=timeout)
|
||||
result = result_queue.get(timeout=timeout) or default
|
||||
error_result = error_queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
return default
|
||||
result = default
|
||||
|
||||
if error_result:
|
||||
logger.error(f"PowerShell error output: {error_result}")
|
||||
|
||||
return result
|
||||
|
||||
def json_parse_output(self, output: str) -> dict:
|
||||
"""
|
||||
@@ -179,13 +216,29 @@ class PowerShellSession:
|
||||
Returns:
|
||||
dict: Parsed JSON object if found, otherwise an empty dictionary.
|
||||
|
||||
Raises:
|
||||
JSONDecodeError: If the JSON parsing fails.
|
||||
|
||||
Example:
|
||||
>>> json_parse_output('Some text {"key": "value"} more text')
|
||||
{"key": "value"}
|
||||
"""
|
||||
if output == "":
|
||||
return {}
|
||||
|
||||
json_match = re.search(r"(\[.*\]|\{.*\})", output, re.DOTALL)
|
||||
if json_match:
|
||||
return json.loads(json_match.group(1))
|
||||
if not json_match:
|
||||
logger.error(
|
||||
f"Unexpected PowerShell output: {output}\n",
|
||||
)
|
||||
else:
|
||||
try:
|
||||
return json.loads(json_match.group(1))
|
||||
except json.JSONDecodeError as error:
|
||||
logger.error(
|
||||
f"Error parsing PowerShell output as JSON: {str(error)}\n",
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
def close(self) -> None:
|
||||
|
||||
@@ -4186,12 +4186,10 @@
|
||||
"entityresolution": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
@@ -7841,17 +7839,6 @@
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"opsworkscm": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-southeast-2",
|
||||
"eu-west-1",
|
||||
"us-east-1"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"opsworkspuppetenterprise": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -8051,6 +8038,44 @@
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"pca-connector-scep": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"pcs": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -10434,11 +10459,19 @@
|
||||
"socialmessaging": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
|
||||
@@ -224,7 +224,10 @@ def validate_arguments(arguments: Namespace) -> tuple[bool, str]:
|
||||
|
||||
def validate_bucket(bucket_name: str) -> str:
|
||||
"""validate_bucket validates that the input bucket_name is valid"""
|
||||
if search("(?!(^xn--|.+-s3alias$))^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$", bucket_name):
|
||||
if search(
|
||||
"^(?!^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$)(?!.*\.{2})(?!.*\.-)(?!.*-\.)(?!^xn--)(?!^sthree-)(?!^amzn-s3-demo-)(?!.*--table-s3$)[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$",
|
||||
bucket_name,
|
||||
):
|
||||
return bucket_name
|
||||
else:
|
||||
raise ArgumentTypeError(
|
||||
|
||||
@@ -105,6 +105,12 @@ class S3:
|
||||
"""
|
||||
try:
|
||||
uploaded_objects = {"success": {}, "failure": {}}
|
||||
extension_to_content_type = {
|
||||
".html": "text/html",
|
||||
".csv": "text/csv",
|
||||
".ocsf.json": "application/json",
|
||||
".asff.json": "application/json",
|
||||
}
|
||||
# Keys are regular and/or compliance
|
||||
for key, output_list in outputs.items():
|
||||
for output in output_list:
|
||||
@@ -115,6 +121,7 @@ class S3:
|
||||
|
||||
bucket_directory = self.get_object_path(self._output_directory)
|
||||
basename = path.basename(output.file_descriptor.name)
|
||||
file_extension = output.file_extension
|
||||
|
||||
if key == "compliance":
|
||||
object_name = f"{bucket_directory}/{key}/{basename}"
|
||||
@@ -128,7 +135,12 @@ class S3:
|
||||
# into the local filesystem because S3 upload file is the recommended way.
|
||||
# https://aws.amazon.com/blogs/developer/uploading-files-to-amazon-s3/
|
||||
self._session.upload_file(
|
||||
output.file_descriptor.name, self._bucket_name, object_name
|
||||
Filename=output.file_descriptor.name,
|
||||
Bucket=self._bucket_name,
|
||||
Key=object_name,
|
||||
ExtraArgs={
|
||||
"ContentType": extension_to_content_type[file_extension]
|
||||
},
|
||||
)
|
||||
|
||||
if output.file_extension in uploaded_objects["success"]:
|
||||
|
||||
+1
-1
@@ -14,7 +14,7 @@ class acm_certificates_with_secure_key_algorithms(Check):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} uses a secure key algorithm ({certificate.key_algorithm})."
|
||||
if certificate.key_algorithm in acm_client.audit_config.get(
|
||||
"insecure_key_algorithms", ["RSA-1024", "P-192", "SHA-1"]
|
||||
"insecure_key_algorithms", ["RSA-1024", "P-192"]
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} does not use a secure key algorithm ({certificate.key_algorithm})."
|
||||
|
||||
+5
@@ -29,10 +29,15 @@ class cloudtrail_multi_region_enabled(Check):
|
||||
break
|
||||
# If there are no trails logging it is needed to store the FAIL once all the trails have been checked
|
||||
if not trail_is_logging:
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(),
|
||||
resource={},
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"No CloudTrail trails enabled with logging were found."
|
||||
)
|
||||
report.region = region
|
||||
report.resource_arn = cloudtrail_client._get_trail_arn_template(
|
||||
region
|
||||
)
|
||||
|
||||
+1
-1
@@ -21,7 +21,7 @@
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Modify the security group to remove the rule that allows ingress from the internet to TCP port 389 or 636 (LDAP).",
|
||||
"Text": "Modify the security group to remove the rule that allows ingress from the internet to TCP port 139 or 445 (CIFS).",
|
||||
"Url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -880,9 +880,14 @@ class IAM(AWSService):
|
||||
SAMLProviderArn=resource.arn
|
||||
).get("Tags", [])
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _get_last_accessed_services(self):
|
||||
logger.info("IAM - Getting Last Accessed Services ...")
|
||||
|
||||
@@ -161,7 +161,7 @@ class Defender(AzureService):
|
||||
{
|
||||
security_contact_default.name: SecurityContacts(
|
||||
resource_id=security_contact_default.id,
|
||||
name=security_contact_default.get("name", "default"),
|
||||
name=getattr(security_contact_default, "name", "default"),
|
||||
emails=security_contact_default.emails,
|
||||
phone=security_contact_default.phone,
|
||||
alert_notifications_minimal_severity=security_contact_default.alert_notifications.minimal_severity,
|
||||
|
||||
@@ -78,15 +78,18 @@ class Network(AzureService):
|
||||
FlowLog(
|
||||
id=flow_log.id,
|
||||
name=flow_log.name,
|
||||
enabled=getattr(
|
||||
getattr(flow_log, "properties", None),
|
||||
"enabled",
|
||||
False,
|
||||
),
|
||||
retention_policy=getattr(
|
||||
getattr(flow_log, "properties", None),
|
||||
"retentionPolicy",
|
||||
None,
|
||||
enabled=flow_log.enabled,
|
||||
retention_policy=RetentionPolicy(
|
||||
enabled=(
|
||||
flow_log.retention_policy.enabled
|
||||
if flow_log.retention_policy
|
||||
else False
|
||||
),
|
||||
days=(
|
||||
flow_log.retention_policy.days
|
||||
if flow_log.retention_policy
|
||||
else 0
|
||||
),
|
||||
),
|
||||
)
|
||||
for flow_log in flow_logs
|
||||
|
||||
@@ -221,6 +221,7 @@ class Provider(ABC):
|
||||
az_cli_auth=arguments.az_cli_auth,
|
||||
browser_auth=arguments.browser_auth,
|
||||
tenant_id=arguments.tenant_id,
|
||||
init_modules=arguments.init_modules,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
elif "nhn" in provider_class_name.lower():
|
||||
|
||||
@@ -30,18 +30,18 @@ class CloudSQL(GCPService):
|
||||
region=instance["region"],
|
||||
ip_addresses=instance.get("ipAddresses", []),
|
||||
public_ip=public_ip,
|
||||
require_ssl=instance["settings"]["ipConfiguration"].get(
|
||||
"requireSsl", False
|
||||
),
|
||||
ssl_mode=instance["settings"]["ipConfiguration"].get(
|
||||
"sslMode", "ALLOW_UNENCRYPTED_AND_ENCRYPTED"
|
||||
),
|
||||
require_ssl=instance["settings"]
|
||||
.get("ipConfiguration", {})
|
||||
.get("requireSsl", False),
|
||||
ssl_mode=instance["settings"]
|
||||
.get("ipConfiguration", {})
|
||||
.get("sslMode", "ALLOW_UNENCRYPTED_AND_ENCRYPTED"),
|
||||
automated_backups=instance["settings"][
|
||||
"backupConfiguration"
|
||||
]["enabled"],
|
||||
authorized_networks=instance["settings"][
|
||||
"ipConfiguration"
|
||||
]["authorizedNetworks"],
|
||||
authorized_networks=instance["settings"]
|
||||
.get("ipConfiguration", {})
|
||||
.get("authorizedNetworks", []),
|
||||
flags=instance["settings"].get("databaseFlags", []),
|
||||
project_id=project_id,
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ class M365BaseException(ProwlerException):
|
||||
"message": "Tenant Id is required for Microsoft 365 static credentials. Make sure you are using the correct credentials.",
|
||||
"remediation": "Check the Microsoft 365 Tenant ID and ensure it is properly set up.",
|
||||
},
|
||||
(6022, "M365MissingEnvironmentUserCredentialsError"): {
|
||||
(6022, "M365MissingEnvironmentCredentialsError"): {
|
||||
"message": "User and Password environment variables are needed to use Credentials authentication method.",
|
||||
"remediation": "Ensure your environment variables are properly set up.",
|
||||
},
|
||||
@@ -102,6 +102,18 @@ class M365BaseException(ProwlerException):
|
||||
"message": "User or Password environment variables are not correct.",
|
||||
"remediation": "Ensure you are using the right credentials.",
|
||||
},
|
||||
(6024, "M365NotValidUserError"): {
|
||||
"message": "The provided M365 User is not valid.",
|
||||
"remediation": "Check the M365 User and ensure it is a valid user.",
|
||||
},
|
||||
(6025, "M365NotValidEncryptedPasswordError"): {
|
||||
"message": "The provided M365 Encrypted Password is not valid.",
|
||||
"remediation": "Check the M365 Encrypted Password and ensure it is a valid password.",
|
||||
},
|
||||
(6026, "M365UserNotBelongingToTenantError"): {
|
||||
"message": "The provided M365 User does not belong to the specified tenant.",
|
||||
"remediation": "Check the M365 User email domain and ensure it belongs to the specified tenant.",
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
@@ -279,7 +291,7 @@ class M365NotTenantIdButClientIdAndClientSecretError(M365CredentialsError):
|
||||
)
|
||||
|
||||
|
||||
class M365MissingEnvironmentUserCredentialsError(M365CredentialsError):
|
||||
class M365MissingEnvironmentCredentialsError(M365CredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
6022, file=file, original_exception=original_exception, message=message
|
||||
@@ -291,3 +303,24 @@ class M365EnvironmentUserCredentialsError(M365CredentialsError):
|
||||
super().__init__(
|
||||
6023, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class M365NotValidUserError(M365CredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
6024, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class M365NotValidEncryptedPasswordError(M365CredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
6025, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class M365UserNotBelongingToTenantError(M365CredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
6026, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
@@ -35,16 +35,9 @@ def init_parser(self):
|
||||
help="Microsoft 365 Tenant ID to be used with --browser-auth option",
|
||||
)
|
||||
m365_parser.add_argument(
|
||||
"--user",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="Microsoft 365 user email",
|
||||
)
|
||||
m365_parser.add_argument(
|
||||
"--encypted-password",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="Microsoft 365 encrypted password",
|
||||
"--init-modules",
|
||||
action="store_true",
|
||||
help="Initialize Microsoft 365 PowerShell modules",
|
||||
)
|
||||
# Regions
|
||||
m365_regions_subparser = m365_parser.add_argument_group("Regions")
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import os
|
||||
|
||||
import msal
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.powershell.powershell import PowerShellSession
|
||||
from prowler.providers.m365.exceptions.exceptions import (
|
||||
M365UserNotBelongingToTenantError,
|
||||
)
|
||||
from prowler.providers.m365.models import M365Credentials
|
||||
|
||||
|
||||
@@ -21,6 +27,7 @@ class M365PowerShell(PowerShellSession):
|
||||
|
||||
Attributes:
|
||||
credentials (M365Credentials): The Microsoft 365 credentials used for authentication.
|
||||
required_modules (list): List of required PowerShell modules for M365 operations.
|
||||
|
||||
Note:
|
||||
This class requires the Microsoft Teams and Exchange Online PowerShell modules
|
||||
@@ -79,16 +86,14 @@ class M365PowerShell(PowerShellSession):
|
||||
bool: True if credentials are valid and authentication succeeds, False otherwise.
|
||||
"""
|
||||
self.execute(
|
||||
f'$securePassword = "{credentials.passwd}" | ConvertTo-SecureString\n'
|
||||
f'$securePassword = "{credentials.passwd}" | ConvertTo-SecureString'
|
||||
)
|
||||
self.execute(
|
||||
f'$credential = New-Object System.Management.Automation.PSCredential("{credentials.user}", $securePassword)\n'
|
||||
)
|
||||
self.process.stdin.write(
|
||||
'Write-Output "$($credential.GetNetworkCredential().Password)"\n'
|
||||
decrypted_password = self.execute(
|
||||
'Write-Output "$($credential.GetNetworkCredential().Password)"'
|
||||
)
|
||||
self.process.stdin.write(f"Write-Output '{self.END}'\n")
|
||||
decrypted_password = self.read_output()
|
||||
|
||||
app = msal.ConfidentialClientApplication(
|
||||
client_id=credentials.client_id,
|
||||
@@ -102,7 +107,21 @@ class M365PowerShell(PowerShellSession):
|
||||
scopes=["https://graph.microsoft.com/.default"],
|
||||
)
|
||||
|
||||
return "access_token" in result
|
||||
if result is None:
|
||||
return False
|
||||
|
||||
if "access_token" not in result:
|
||||
return False
|
||||
|
||||
# Validate user credentials belong to tenant
|
||||
user_domain = credentials.user.split("@")[1]
|
||||
if not credentials.provider_id.endswith(user_domain):
|
||||
raise M365UserNotBelongingToTenantError(
|
||||
file=os.path.basename(__file__),
|
||||
message="The provided M365 User does not belong to the specified tenant.",
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def connect_microsoft_teams(self) -> dict:
|
||||
"""
|
||||
@@ -135,7 +154,48 @@ class M365PowerShell(PowerShellSession):
|
||||
"AllowGoogleDrive": true
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-CsTeamsClientConfiguration | ConvertTo-Json")
|
||||
return self.execute(
|
||||
"Get-CsTeamsClientConfiguration | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
def get_global_meeting_policy(self) -> dict:
|
||||
"""
|
||||
Get Teams Global Meeting Policy.
|
||||
|
||||
Retrieves the current Microsoft Teams global meeting policy settings.
|
||||
|
||||
Returns:
|
||||
dict: Teams global meeting policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_global_meeting_policy()
|
||||
{
|
||||
"AllowAnonymousUsersToJoinMeeting": true
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-CsTeamsMeetingPolicy -Identity Global | ConvertTo-Json",
|
||||
json_parse=True,
|
||||
)
|
||||
|
||||
def get_user_settings(self) -> dict:
|
||||
"""
|
||||
Get Teams User Settings.
|
||||
|
||||
Retrieves the current Microsoft Teams user settings.
|
||||
|
||||
Returns:
|
||||
dict: Teams user settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_user_settings()
|
||||
{
|
||||
"AllowExternalAccess": true
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-CsTenantFederationConfiguration | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
def connect_exchange_online(self) -> dict:
|
||||
"""
|
||||
@@ -167,5 +227,324 @@ class M365PowerShell(PowerShellSession):
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-AdminAuditLogConfig | Select-Object UnifiedAuditLogIngestionEnabled | ConvertTo-Json"
|
||||
"Get-AdminAuditLogConfig | Select-Object UnifiedAuditLogIngestionEnabled | ConvertTo-Json",
|
||||
json_parse=True,
|
||||
)
|
||||
|
||||
def get_malware_filter_policy(self) -> dict:
|
||||
"""
|
||||
Get Defender Malware Filter Policy.
|
||||
|
||||
Retrieves the current Defender anti-malware filter policy settings.
|
||||
|
||||
Returns:
|
||||
dict: Malware filter policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_malware_filter_policy()
|
||||
{
|
||||
"EnableFileFilter": true,
|
||||
"Identity": "Default"
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-MalwareFilterPolicy | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_outbound_spam_filter_policy(self) -> dict:
|
||||
"""
|
||||
Get Defender Outbound Spam Filter Policy.
|
||||
|
||||
Retrieves the current Defender outbound spam filter policy settings.
|
||||
|
||||
Returns:
|
||||
dict: Outbound spam filter policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_outbound_spam_filter_policy()
|
||||
{
|
||||
"NotifyOutboundSpam": true,
|
||||
"BccSuspiciousOutboundMail": true,
|
||||
"BccSuspiciousOutboundAdditionalRecipients": [],
|
||||
"NotifyOutboundSpamRecipients": []
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-HostedOutboundSpamFilterPolicy | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
def get_outbound_spam_filter_rule(self) -> dict:
|
||||
"""
|
||||
Get Defender Outbound Spam Filter Rule.
|
||||
|
||||
Retrieves the current Defender outbound spam filter rule settings.
|
||||
|
||||
Returns:
|
||||
dict: Outbound spam filter rule settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_outbound_spam_filter_rule()
|
||||
{
|
||||
"State": "Enabled"
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-HostedOutboundSpamFilterRule | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
def get_antiphishing_policy(self) -> dict:
|
||||
"""
|
||||
Get Defender Antiphishing Policy.
|
||||
|
||||
Retrieves the current Defender anti-phishing policy settings.
|
||||
|
||||
Returns:
|
||||
dict: Antiphishing policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_antiphishing_policy()
|
||||
{
|
||||
"EnableSpoofIntelligence": true,
|
||||
"AuthenticationFailAction": "Quarantine",
|
||||
"DmarcRejectAction": "Quarantine",
|
||||
"DmarcQuarantineAction": "Quarantine",
|
||||
"EnableFirstContactSafetyTips": true,
|
||||
"EnableUnauthenticatedSender": true,
|
||||
"EnableViaTag": true,
|
||||
"HonorDmarcPolicy": true,
|
||||
"IsDefault": false
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-AntiPhishPolicy | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_antiphishing_rules(self) -> dict:
|
||||
"""
|
||||
Get Defender Antiphishing Rules.
|
||||
|
||||
Retrieves the current Defender anti-phishing rules.
|
||||
|
||||
Returns:
|
||||
dict: Antiphishing rules in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_antiphishing_rules()
|
||||
{
|
||||
"Name": "Rule1",
|
||||
"State": Enabled,
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-AntiPhishRule | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_organization_config(self) -> dict:
|
||||
"""
|
||||
Get Exchange Online Organization Configuration.
|
||||
|
||||
Retrieves the current Exchange Online organization configuration settings.
|
||||
|
||||
Returns:
|
||||
dict: Organization configuration settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_organization_config()
|
||||
{
|
||||
"Name": "MyOrganization",
|
||||
"Guid": "12345678-1234-1234-1234-123456789012"
|
||||
"AuditDisabled": false
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-OrganizationConfig | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_mailbox_audit_config(self) -> dict:
|
||||
"""
|
||||
Get Exchange Online Mailbox Audit Configuration.
|
||||
|
||||
Retrieves the current mailbox audit configuration settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: Mailbox audit configuration settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_mailbox_audit_config()
|
||||
{
|
||||
"Name": "MyMailbox",
|
||||
"Id": "12345678-1234-1234-1234-123456789012",
|
||||
"AuditBypassEnabled": false
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-MailboxAuditBypassAssociation | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
def get_mailbox_policy(self) -> dict:
|
||||
"""
|
||||
Get Mailbox Policy.
|
||||
|
||||
Retrieves the current mailbox policy settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: Mailbox policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_mailbox_policy()
|
||||
{
|
||||
"Id": "OwaMailboxPolicy-Default",
|
||||
"AdditionalStorageProvidersAvailable": True
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-OwaMailboxPolicy | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_external_mail_config(self) -> dict:
|
||||
"""
|
||||
Get Exchange Online External Mail Configuration.
|
||||
|
||||
Retrieves the current external mail configuration settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: External mail configuration settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_external_mail_config()
|
||||
{
|
||||
"Identity": "MyExternalMail",
|
||||
"ExternalMailTagEnabled": true
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-ExternalInOutlook | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_transport_rules(self) -> dict:
|
||||
"""
|
||||
Get Exchange Online Transport Rules.
|
||||
|
||||
Retrieves the current transport rules configured in Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: Transport rules in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_transport_rules()
|
||||
{
|
||||
"Name": "Rule1",
|
||||
"SetSCL": -1,
|
||||
"SenderDomainIs": ["example.com"]
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-TransportRule | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_connection_filter_policy(self) -> dict:
|
||||
"""
|
||||
Get Exchange Online Connection Filter Policy.
|
||||
|
||||
Retrieves the current connection filter policy settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: Connection filter policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_connection_filter_policy()
|
||||
{
|
||||
"Identity": "Default",
|
||||
"IPAllowList": []"
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-HostedConnectionFilterPolicy -Identity Default | ConvertTo-Json",
|
||||
json_parse=True,
|
||||
)
|
||||
|
||||
def get_dkim_config(self) -> dict:
|
||||
"""
|
||||
Get DKIM Signing Configuration.
|
||||
|
||||
Retrieves the current DKIM signing configuration settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: DKIM signing configuration settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_dkim_config()
|
||||
{
|
||||
"Id": "12345678-1234-1234-1234-123456789012",
|
||||
"Enabled": true
|
||||
}
|
||||
"""
|
||||
return self.execute("Get-DkimSigningConfig | ConvertTo-Json", json_parse=True)
|
||||
|
||||
def get_inbound_spam_filter_policy(self) -> dict:
|
||||
"""
|
||||
Get Inbound Spam Filter Policy.
|
||||
|
||||
Retrieves the current inbound spam filter policy settings for Exchange Online.
|
||||
|
||||
Returns:
|
||||
dict: Inbound spam filter policy settings in JSON format.
|
||||
|
||||
Example:
|
||||
>>> get_inbound_spam_filter_policy()
|
||||
{
|
||||
"Identity": "Default",
|
||||
"AllowedSenderDomains": "[]"
|
||||
}
|
||||
"""
|
||||
return self.execute(
|
||||
"Get-HostedContentFilterPolicy | ConvertTo-Json", json_parse=True
|
||||
)
|
||||
|
||||
|
||||
# This function is used to install the required M365 PowerShell modules in Docker containers
|
||||
def initialize_m365_powershell_modules():
|
||||
"""
|
||||
Initialize required PowerShell modules.
|
||||
|
||||
Checks if the required PowerShell modules are installed and installs them if necessary.
|
||||
This method ensures that all required modules for M365 operations are available.
|
||||
|
||||
Returns:
|
||||
bool: True if all modules were successfully initialized, False otherwise
|
||||
"""
|
||||
|
||||
REQUIRED_MODULES = [
|
||||
"ExchangeOnlineManagement",
|
||||
"MicrosoftTeams",
|
||||
]
|
||||
|
||||
pwsh = PowerShellSession()
|
||||
try:
|
||||
for module in REQUIRED_MODULES:
|
||||
try:
|
||||
# Check if module is already installed
|
||||
result = pwsh.execute(
|
||||
f"Get-Module -ListAvailable -Name {module}", timeout=5
|
||||
)
|
||||
|
||||
# Install module if not installed
|
||||
if not result:
|
||||
install_result = pwsh.execute(
|
||||
f'Install-Module -Name "{module}" -Force -AllowClobber -Scope CurrentUser',
|
||||
timeout=30,
|
||||
)
|
||||
if install_result:
|
||||
logger.warning(
|
||||
f"Unexpected output while installing module {module}: {install_result}"
|
||||
)
|
||||
else:
|
||||
logger.info(f"Successfully installed module {module}")
|
||||
|
||||
# Import module
|
||||
pwsh.execute(f'Import-Module -Name "{module}" -Force', timeout=1)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(f"Failed to initialize module {module}: {str(error)}")
|
||||
return False
|
||||
|
||||
return True
|
||||
finally:
|
||||
pwsh.close()
|
||||
|
||||
|
||||
def main():
|
||||
if initialize_m365_powershell_modules():
|
||||
logger.info("M365 PowerShell modules initialized successfully")
|
||||
else:
|
||||
logger.error("Failed to initialize M365 PowerShell modules")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -13,5 +13,7 @@ class M365Service:
|
||||
self.audit_config = provider.audit_config
|
||||
self.fixer_config = provider.fixer_config
|
||||
|
||||
if provider.credentials:
|
||||
self.powershell = M365PowerShell(provider.credentials)
|
||||
# Initialize PowerShell client only if credentials are available
|
||||
self.powershell = (
|
||||
M365PowerShell(provider.credentials) if provider.credentials else None
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
from argparse import ArgumentTypeError
|
||||
from os import getenv
|
||||
from uuid import UUID
|
||||
@@ -40,19 +39,24 @@ from prowler.providers.m365.exceptions.exceptions import (
|
||||
M365HTTPResponseError,
|
||||
M365InteractiveBrowserCredentialError,
|
||||
M365InvalidProviderIdError,
|
||||
M365MissingEnvironmentUserCredentialsError,
|
||||
M365MissingEnvironmentCredentialsError,
|
||||
M365NoAuthenticationMethodError,
|
||||
M365NotTenantIdButClientIdAndClientSecretError,
|
||||
M365NotValidClientIdError,
|
||||
M365NotValidClientSecretError,
|
||||
M365NotValidEncryptedPasswordError,
|
||||
M365NotValidTenantIdError,
|
||||
M365NotValidUserError,
|
||||
M365SetUpRegionConfigError,
|
||||
M365SetUpSessionError,
|
||||
M365TenantIdAndClientIdNotBelongingToClientSecretError,
|
||||
M365TenantIdAndClientSecretNotBelongingToClientIdError,
|
||||
)
|
||||
from prowler.providers.m365.lib.mutelist.mutelist import M365Mutelist
|
||||
from prowler.providers.m365.lib.powershell.m365_powershell import M365PowerShell
|
||||
from prowler.providers.m365.lib.powershell.m365_powershell import (
|
||||
M365PowerShell,
|
||||
initialize_m365_powershell_modules,
|
||||
)
|
||||
from prowler.providers.m365.lib.regions.regions import get_regions_config
|
||||
from prowler.providers.m365.models import (
|
||||
M365Credentials,
|
||||
@@ -99,21 +103,22 @@ class M365Provider(Provider):
|
||||
_audit_config: dict
|
||||
_region_config: M365RegionConfig
|
||||
_mutelist: M365Mutelist
|
||||
_credentials: M365Credentials
|
||||
_credentials: M365Credentials = {}
|
||||
# TODO: this is not optional, enforce for all providers
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sp_env_auth: bool,
|
||||
env_auth: bool,
|
||||
az_cli_auth: bool,
|
||||
browser_auth: bool,
|
||||
sp_env_auth: bool = False,
|
||||
env_auth: bool = False,
|
||||
az_cli_auth: bool = False,
|
||||
browser_auth: bool = False,
|
||||
tenant_id: str = None,
|
||||
client_id: str = None,
|
||||
client_secret: str = None,
|
||||
user: str = None,
|
||||
encrypted_password: str = None,
|
||||
init_modules: bool = False,
|
||||
region: str = "M365Global",
|
||||
config_content: dict = None,
|
||||
config_path: str = None,
|
||||
@@ -187,9 +192,6 @@ class M365Provider(Provider):
|
||||
self._region_config,
|
||||
)
|
||||
|
||||
# Set up PowerShell session credentials
|
||||
self._credentials = self.setup_powershell(env_auth, m365_credentials)
|
||||
|
||||
# Set up the identity
|
||||
self._identity = self.setup_identity(
|
||||
az_cli_auth,
|
||||
@@ -199,6 +201,14 @@ class M365Provider(Provider):
|
||||
client_id,
|
||||
)
|
||||
|
||||
# Set up PowerShell session credentials
|
||||
self._credentials = self.setup_powershell(
|
||||
env_auth=env_auth,
|
||||
m365_credentials=m365_credentials,
|
||||
provider_id=self.identity.tenant_domain,
|
||||
init_modules=init_modules,
|
||||
)
|
||||
|
||||
# Audit Config
|
||||
if config_content:
|
||||
self._audit_config = config_content
|
||||
@@ -294,8 +304,8 @@ class M365Provider(Provider):
|
||||
M365BrowserAuthNoTenantIDError: If browser authentication is enabled but the tenant ID is not found.
|
||||
"""
|
||||
|
||||
if not client_id and not client_secret and not user and not encrypted_password:
|
||||
if not browser_auth and tenant_id:
|
||||
if not client_id and not client_secret:
|
||||
if not browser_auth and tenant_id and not env_auth:
|
||||
raise M365BrowserAuthNoFlagError(
|
||||
file=os.path.basename(__file__),
|
||||
message="M365 tenant ID error: browser authentication flag (--browser-auth) not found",
|
||||
@@ -315,6 +325,12 @@ class M365Provider(Provider):
|
||||
file=os.path.basename(__file__),
|
||||
message="M365 Tenant ID (--tenant-id) is required for browser authentication mode",
|
||||
)
|
||||
elif env_auth:
|
||||
if not user or not encrypted_password or not tenant_id:
|
||||
raise M365MissingEnvironmentCredentialsError(
|
||||
file=os.path.basename(__file__),
|
||||
message="M365 provider requires AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_TENANT_ID, M365_USER and M365_ENCRYPTED_PASSWORD environment variables to be set when using --env-auth",
|
||||
)
|
||||
else:
|
||||
if not tenant_id:
|
||||
raise M365NotTenantIdButClientIdAndClientSecretError(
|
||||
@@ -362,7 +378,10 @@ class M365Provider(Provider):
|
||||
|
||||
@staticmethod
|
||||
def setup_powershell(
|
||||
env_auth: bool = False, m365_credentials: dict = {}
|
||||
env_auth: bool = False,
|
||||
m365_credentials: dict = {},
|
||||
provider_id: str = None,
|
||||
init_modules: bool = False,
|
||||
) -> M365Credentials:
|
||||
"""Gets the M365 credentials.
|
||||
|
||||
@@ -382,6 +401,7 @@ class M365Provider(Provider):
|
||||
client_id=m365_credentials.get("client_id", ""),
|
||||
client_secret=m365_credentials.get("client_secret", ""),
|
||||
tenant_id=m365_credentials.get("tenant_id", ""),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
elif env_auth:
|
||||
m365_user = getenv("M365_USER")
|
||||
@@ -394,7 +414,7 @@ class M365Provider(Provider):
|
||||
logger.critical(
|
||||
"M365 provider: Missing M365_USER or M365_ENCRYPTED_PASSWORD environment variables needed for credentials authentication"
|
||||
)
|
||||
raise M365MissingEnvironmentUserCredentialsError(
|
||||
raise M365MissingEnvironmentCredentialsError(
|
||||
file=os.path.basename(__file__),
|
||||
message="Missing M365_USER or M365_ENCRYPTED_PASSWORD environment variables required for credentials authentication.",
|
||||
)
|
||||
@@ -404,12 +424,15 @@ class M365Provider(Provider):
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
|
||||
if credentials:
|
||||
test_session = M365PowerShell(credentials)
|
||||
try:
|
||||
if test_session.test_credentials(credentials):
|
||||
if init_modules:
|
||||
initialize_m365_powershell_modules()
|
||||
return credentials
|
||||
raise M365EnvironmentUserCredentialsError(
|
||||
file=os.path.basename(__file__),
|
||||
@@ -434,8 +457,11 @@ class M365Provider(Provider):
|
||||
f"M365 Region: {Fore.YELLOW}{self.region_config.name}{Style.RESET_ALL}",
|
||||
f"M365 Tenant Domain: {Fore.YELLOW}{self._identity.tenant_domain}{Style.RESET_ALL} M365 Tenant ID: {Fore.YELLOW}{self._identity.tenant_id}{Style.RESET_ALL}",
|
||||
f"M365 Identity Type: {Fore.YELLOW}{self._identity.identity_type}{Style.RESET_ALL} M365 Identity ID: {Fore.YELLOW}{self._identity.identity_id}{Style.RESET_ALL}",
|
||||
f"M365 User: {Fore.YELLOW}{self.credentials.user}{Style.RESET_ALL}",
|
||||
]
|
||||
if self.credentials and self.credentials.user:
|
||||
report_lines.append(
|
||||
f"M365 User: {Fore.YELLOW}{self.credentials.user}{Style.RESET_ALL}"
|
||||
)
|
||||
report_title = (
|
||||
f"{Style.BRIGHT}Using the M365 credentials below:{Style.RESET_ALL}"
|
||||
)
|
||||
@@ -466,6 +492,9 @@ class M365Provider(Provider):
|
||||
- tenant_id: The M365 Active Directory tenant ID.
|
||||
- client_id: The M365 client ID.
|
||||
- client_secret: The M365 client secret
|
||||
- user: The M365 user email
|
||||
- encrypted_password: The M365 encrypted password
|
||||
- provider_id: The M365 provider ID (in this case the Tenant ID).
|
||||
region_config (M365RegionConfig): The region configuration object.
|
||||
|
||||
Returns:
|
||||
@@ -587,15 +616,16 @@ class M365Provider(Provider):
|
||||
browser_auth: bool = False,
|
||||
tenant_id: str = None,
|
||||
region: str = "M365Global",
|
||||
raise_on_exception=True,
|
||||
client_id=None,
|
||||
client_secret=None,
|
||||
user=None,
|
||||
encrypted_password=None,
|
||||
raise_on_exception: bool = True,
|
||||
client_id: str = None,
|
||||
client_secret: str = None,
|
||||
user: str = None,
|
||||
encrypted_password: str = None,
|
||||
provider_id: str = None,
|
||||
) -> Connection:
|
||||
"""Test connection to M365 subscription.
|
||||
"""Test connection to M365 tenant and PowerShell modules.
|
||||
|
||||
Test the connection to an M365 subscription using the provided credentials.
|
||||
Test the connection to an M365 tenant and PowerShell modules using the provided credentials.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -610,6 +640,7 @@ class M365Provider(Provider):
|
||||
client_secret (str): The M365 client secret.
|
||||
user (str): The M365 user email.
|
||||
encrypted_password (str): The M365 encrypted_password.
|
||||
provider_id (str): The M365 provider ID (in this case the Tenant ID).
|
||||
|
||||
|
||||
Returns:
|
||||
@@ -622,7 +653,7 @@ class M365Provider(Provider):
|
||||
M365InteractiveBrowserCredentialError: If there is an error in retrieving the M365 credentials using browser authentication.
|
||||
M365HTTPResponseError: If there is an HTTP response error.
|
||||
M365ConfigCredentialsError: If there is an error in configuring the M365 credentials from a dictionary.
|
||||
|
||||
M365InvalidProviderIdError: If the provider ID does not match the application tenant domain.
|
||||
|
||||
Examples:
|
||||
>>> M365Provider.test_connection(az_cli_auth=True)
|
||||
@@ -649,11 +680,22 @@ class M365Provider(Provider):
|
||||
# Get the dict from the static credentials
|
||||
m365_credentials = None
|
||||
if tenant_id and client_id and client_secret:
|
||||
m365_credentials = M365Provider.validate_static_credentials(
|
||||
tenant_id=tenant_id,
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
)
|
||||
if not user and not encrypted_password:
|
||||
m365_credentials = M365Provider.validate_static_credentials(
|
||||
tenant_id=tenant_id,
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
user="user",
|
||||
encrypted_password="encrypted_password",
|
||||
)
|
||||
else:
|
||||
m365_credentials = M365Provider.validate_static_credentials(
|
||||
tenant_id=tenant_id,
|
||||
client_id=client_id,
|
||||
client_secret=client_secret,
|
||||
user=user,
|
||||
encrypted_password=encrypted_password,
|
||||
)
|
||||
|
||||
# Set up the M365 session
|
||||
credentials = M365Provider.setup_session(
|
||||
@@ -668,7 +710,30 @@ class M365Provider(Provider):
|
||||
|
||||
GraphServiceClient(credentials=credentials)
|
||||
|
||||
logger.info("M365 provider: Connection to M365 successful")
|
||||
logger.info("M365 provider: Connection to MSGraph successful")
|
||||
|
||||
# Set up PowerShell credentials
|
||||
if user and encrypted_password:
|
||||
M365Provider.setup_powershell(
|
||||
env_auth,
|
||||
m365_credentials,
|
||||
provider_id,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"M365 provider: Connection to PowerShell has not been requested"
|
||||
)
|
||||
|
||||
logger.info("M365 provider: Connection to PowerShell successful")
|
||||
|
||||
# Check that user domain, provider_id and Graph client tenant_domain are the same
|
||||
if user and encrypted_password:
|
||||
user_domain = user.split("@")[1]
|
||||
if provider_id and user_domain != provider_id:
|
||||
raise M365InvalidProviderIdError(
|
||||
file=os.path.basename(__file__),
|
||||
message=f"Provider ID {provider_id} does not match Application tenant domain {user_domain}",
|
||||
)
|
||||
|
||||
return Connection(is_connected=True)
|
||||
|
||||
@@ -896,7 +961,11 @@ class M365Provider(Provider):
|
||||
|
||||
@staticmethod
|
||||
def validate_static_credentials(
|
||||
tenant_id: str = None, client_id: str = None, client_secret: str = None
|
||||
tenant_id: str = None,
|
||||
client_id: str = None,
|
||||
client_secret: str = None,
|
||||
user: str = None,
|
||||
encrypted_password: str = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Validates the static credentials for the M365 provider.
|
||||
@@ -905,6 +974,8 @@ class M365Provider(Provider):
|
||||
tenant_id (str): The M365 Active Directory tenant ID.
|
||||
client_id (str): The M365 client ID.
|
||||
client_secret (str): The M365 client secret.
|
||||
user (str): The M365 user email.
|
||||
encrypted_password (str): The M365 encrypted password.
|
||||
|
||||
Raises:
|
||||
M365NotValidTenantIdError: If the provided M365 Tenant ID is not valid.
|
||||
@@ -934,19 +1005,36 @@ class M365Provider(Provider):
|
||||
file=os.path.basename(__file__),
|
||||
message="The provided M365 Client ID is not valid.",
|
||||
)
|
||||
|
||||
# Validate the Client Secret
|
||||
if not re.match("^[a-zA-Z0-9._~-]+$", client_secret):
|
||||
if not client_secret:
|
||||
raise M365NotValidClientSecretError(
|
||||
file=os.path.basename(__file__),
|
||||
message="The provided M365 Client Secret is not valid.",
|
||||
)
|
||||
|
||||
# Validate the User
|
||||
if not user:
|
||||
raise M365NotValidUserError(
|
||||
file=os.path.basename(__file__),
|
||||
message="The provided M365 User is not valid.",
|
||||
)
|
||||
|
||||
# Validate the Encrypted Password
|
||||
if not encrypted_password:
|
||||
raise M365NotValidEncryptedPasswordError(
|
||||
file=os.path.basename(__file__),
|
||||
message="The provided M365 Encrypted Password is not valid.",
|
||||
)
|
||||
|
||||
try:
|
||||
M365Provider.verify_client(tenant_id, client_id, client_secret)
|
||||
return {
|
||||
"tenant_id": tenant_id,
|
||||
"client_id": client_id,
|
||||
"client_secret": client_secret,
|
||||
"user": user,
|
||||
"encrypted_password": encrypted_password,
|
||||
}
|
||||
except M365NotValidTenantIdError as tenant_id_error:
|
||||
logger.error(
|
||||
|
||||
@@ -25,6 +25,7 @@ class M365Credentials(BaseModel):
|
||||
client_id: str = ""
|
||||
client_secret: str = ""
|
||||
tenant_id: str = ""
|
||||
provider_id: str = ""
|
||||
|
||||
|
||||
class M365OutputOptions(ProviderOutputOptions):
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from asyncio import gather, get_event_loop
|
||||
from typing import List, Optional
|
||||
|
||||
from msgraph.generated.models.o_data_errors.o_data_error import ODataError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -40,20 +39,6 @@ class AdminCenter(M365Service):
|
||||
license_details = await self.client.users.by_user_id(
|
||||
user.id
|
||||
).license_details.get()
|
||||
try:
|
||||
mailbox_settings = await self.client.users.by_user_id(
|
||||
user.id
|
||||
).mailbox_settings.get()
|
||||
mailbox_settings.user_purpose
|
||||
except ODataError as error:
|
||||
if error.error.code == "MailboxNotEnabledForRESTAPI":
|
||||
logger.warning(
|
||||
f"MailboxNotEnabledForRESTAPI for user {user.id}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
users.update(
|
||||
{
|
||||
user.id: User(
|
||||
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "m365",
|
||||
"CheckID": "defender_antiphishing_policy_configured",
|
||||
"CheckTitle": "Ensure anti-phishing policies are properly configured and active.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "defender",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "low",
|
||||
"ResourceType": "Defender Anti-Phishing Policy",
|
||||
"Description": "Ensure that anti-phishing policies are created and configured for specific users, groups, or domains, taking precedence over the default policy. This check verifies the existence of rules within policies and validates specific policy settings such as spoof intelligence, DMARC actions, safety tips, and unauthenticated sender actions.",
|
||||
"Risk": "Without anti-phishing policies, organizations may rely solely on default settings, which might not adequately protect against phishing attacks targeted at specific users, groups, or domains. This increases the risk of successful phishing attempts and potential data breaches.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/microsoft-365/security/office-365-security/set-up-anti-phishing-policies?view=o365-worldwide",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "$params = @{Name='<policy_name>';PhishThresholdLevel=3;EnableTargetedUserProtection=$true;EnableOrganizationDomainsProtection=$true;EnableMailboxIntelligence=$true;EnableMailboxIntelligenceProtection=$true;EnableSpoofIntelligence=$true;TargetedUserProtectionAction='Quarantine';TargetedDomainProtectionAction='Quarantine';MailboxIntelligenceProtectionAction='Quarantine';TargetedUserQuarantineTag='DefaultFullAccessWithNotificationPolicy';MailboxIntelligenceQuarantineTag='DefaultFullAccessWithNotificationPolicy';TargetedDomainQuarantineTag='DefaultFullAccessWithNotificationPolicy';EnableFirstContactSafetyTips=$true;EnableSimilarUsersSafetyTips=$true;EnableSimilarDomainsSafetyTips=$true;EnableUnusualCharactersSafetyTips=$true;HonorDmarcPolicy=$true}; New-AntiPhishPolicy @params; New-AntiPhishRule -Name $params.Name -AntiPhishPolicy $params.Name -RecipientDomainIs (Get-AcceptedDomain).Name -Priority 0",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Navigate to Microsoft 365 Defender https://security.microsoft.com. 2. Click to expand Email & collaboration and select Policies & rules. 3. On the Policies & rules page select Threat policies. 4. Under Policies, select Anti-phishing 5. Ensure policies have rules with the state set to 'on' and validate settings: spoof intelligence enabled, spoof intelligence action set to 'Quarantine', DMARC reject and quarantine actions, safety tips enabled, unauthenticated sender action enabled, show tag enabled, and honor DMARC policy enabled. If not, modify them to be as recommended.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Create and configure anti-phishing policies for specific users, groups, or domains to enhance protection against phishing attacks.",
|
||||
"Url": "https://learn.microsoft.com/en-us/microsoft-365/security/office-365-security/set-up-anti-phishing-policies?view=o365-worldwide"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
+59
@@ -0,0 +1,59 @@
|
||||
from typing import List
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportM365
|
||||
from prowler.providers.m365.services.defender.defender_client import defender_client
|
||||
|
||||
|
||||
class defender_antiphishing_policy_configured(Check):
|
||||
"""
|
||||
Check if an anti-phishing policy is established and properly configured in the Defender service.
|
||||
|
||||
Attributes:
|
||||
metadata: Metadata associated with the check (inherited from Check).
|
||||
"""
|
||||
|
||||
def execute(self) -> List[CheckReportM365]:
|
||||
"""
|
||||
Execute the check to verify if an anti-phishing policy is established and properly configured.
|
||||
|
||||
This method checks the Defender anti-phishing policies to ensure they are configured
|
||||
according to best practices.
|
||||
|
||||
Returns:
|
||||
List[CheckReportM365]: A list of reports containing the result of the check.
|
||||
"""
|
||||
findings = []
|
||||
for policy_name, policy in defender_client.antiphishing_policies.items():
|
||||
report = CheckReportM365(
|
||||
metadata=self.metadata(),
|
||||
resource=policy,
|
||||
resource_name="Defender Anti-Phishing Policy",
|
||||
resource_id=policy_name,
|
||||
)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Anti-phishing policy {policy_name} is not properly configured."
|
||||
)
|
||||
|
||||
if (
|
||||
not policy.default
|
||||
and policy_name in defender_client.antiphising_rules
|
||||
and defender_client.antiphising_rules[policy_name].state.lower()
|
||||
== "enabled"
|
||||
) or policy.default:
|
||||
if (
|
||||
policy.spoof_intelligence
|
||||
and policy.spoof_intelligence_action.lower() == "quarantine"
|
||||
and policy.dmarc_reject_action.lower() == "quarantine"
|
||||
and policy.dmarc_quarantine_action.lower() == "quarantine"
|
||||
and policy.safety_tips
|
||||
and policy.unauthenticated_sender_action
|
||||
and policy.show_tag
|
||||
and policy.honor_dmarc_policy
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Anti-phishing policy {policy_name} is properly configured and enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "m365",
|
||||
"CheckID": "defender_antispam_connection_filter_policy_empty_ip_allowlist",
|
||||
"CheckTitle": "Ensure the Anti-Spam Connection Filter Policy IP Allowlist is empty or undefined.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "defender",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Defender Anti-Spam Policy",
|
||||
"Description": "This check focuses on Microsoft 365 organizations with Exchange Online mailboxes or standalone Exchange Online Protection (EOP) organizations. It ensures that the connection filter policy's IP Allowlist is empty or undefined to prevent bypassing spam filtering and sender authentication checks, which could lead to successful delivery of malicious emails.",
|
||||
"Risk": "Using the IP Allowlist without additional verification like mail flow rules poses a risk, as emails from these sources skip essential security checks (SPF, DKIM, DMARC). This could allow attackers to deliver harmful emails directly to the Inbox.",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "Set-HostedConnectionFilterPolicy -Identity Default -IPAllowList @{}",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Navigate to Microsoft 365 Defender https://security.microsoft.com. 2. Click to expand Email & collaboration and select Policies & rules. 3. On the Policies & rules page select Threat policies. 4. Under Policies, select Anti-spam and click on the Connection filter policy (Default). 5. Remove IP entries from the allow list. 6. Click Save.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that the IP Allowlist in your connection filter policy is empty or undefined to prevent bypassing essential security checks.",
|
||||
"Url": "https://learn.microsoft.com/en-us/powershell/module/exchange/set-hostedconnectionfilterpolicy?view=exchange-ps"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
+43
@@ -0,0 +1,43 @@
|
||||
from typing import List
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportM365
|
||||
from prowler.providers.m365.services.defender.defender_client import defender_client
|
||||
|
||||
|
||||
class defender_antispam_connection_filter_policy_empty_ip_allowlist(Check):
|
||||
"""
|
||||
Check if the IP Allowlist is not used in the Antispam Connection Filter Policy.
|
||||
|
||||
Attributes:
|
||||
metadata: Metadata associated with the check (inherited from Check).
|
||||
"""
|
||||
|
||||
def execute(self) -> List[CheckReportM365]:
|
||||
"""
|
||||
Execute the check to verify if the IP Allowlist is not used.
|
||||
|
||||
This method checks the Antispam Connection Filter Policy to determine if the
|
||||
IP Allowlist is empty or undefined.
|
||||
|
||||
Returns:
|
||||
List[CheckReportM365]: A list of reports containing the result of the check.
|
||||
"""
|
||||
findings = []
|
||||
policy = defender_client.connection_filter_policy
|
||||
if policy:
|
||||
report = CheckReportM365(
|
||||
metadata=self.metadata(),
|
||||
resource=policy,
|
||||
resource_name="Defender Antispam Connection Filter Policy",
|
||||
resource_id=policy.identity,
|
||||
)
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"IP Allowlist is not used in the Antispam Connection Filter Policy {policy.identity}."
|
||||
|
||||
if policy.ip_allow_list:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"IP Allowlist is used in the Antispam Connection Filter Policy {policy.identity} with IPs: {policy.ip_allow_list}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "m365",
|
||||
"CheckID": "defender_antispam_connection_filter_policy_safe_list_off",
|
||||
"CheckTitle": "Ensure the default connection filter policy has the SafeList setting disabled",
|
||||
"CheckType": [],
|
||||
"ServiceName": "defender",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Defender Anti-Spam Policy",
|
||||
"Description": "This check ensures that the EnableSafeList setting in the default connection filter policy is set to False. The safe list, managed dynamically by Microsoft, allows emails from listed IPs to bypass spam filtering and sender authentication checks, posing a security risk.",
|
||||
"Risk": "If the safe list is enabled, emails from IPs on this list can bypass essential security checks (SPF, DKIM, DMARC), potentially allowing malicious emails to be delivered directly to users' inboxes.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/defender-office-365/connection-filter-policies-configure",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "Set-HostedConnectionFilterPolicy -Identity Default -EnableSafeList $false",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Navigate to Microsoft 365 Defender https://security.microsoft.com. 2. Click to expand Email & collaboration and select Policies & rules. 3. On the Policies & rules page select Threat policies. 4. Under Policies, select Anti-spam and click on the Connection filter policy (Default). 5. Disable the safe list option. 6. Click Save.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that the EnableSafeList setting in your connection filter policy is set to False to prevent bypassing essential security checks.",
|
||||
"Url": "https://learn.microsoft.com/en-us/defender-office-365/create-safe-sender-lists-in-office-365#use-the-ip-allow-list"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
+43
@@ -0,0 +1,43 @@
|
||||
from typing import List
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportM365
|
||||
from prowler.providers.m365.services.defender.defender_client import defender_client
|
||||
|
||||
|
||||
class defender_antispam_connection_filter_policy_safe_list_off(Check):
|
||||
"""
|
||||
Check if the Safe List is off in the Antispam Connection Filter Policy.
|
||||
|
||||
Attributes:
|
||||
metadata: Metadata associated with the check (inherited from Check).
|
||||
"""
|
||||
|
||||
def execute(self) -> List[CheckReportM365]:
|
||||
"""
|
||||
Execute the check to verify if the Safe List is off.
|
||||
|
||||
This method checks the Antispam Connection Filter Policy to determine if the
|
||||
Safe List is disabled.
|
||||
|
||||
Returns:
|
||||
List[CheckReportM365]: A list of reports containing the result of the check.
|
||||
"""
|
||||
findings = []
|
||||
policy = defender_client.connection_filter_policy
|
||||
if policy:
|
||||
report = CheckReportM365(
|
||||
metadata=self.metadata(),
|
||||
resource=policy,
|
||||
resource_name="Defender Antispam Connection Filter Policy",
|
||||
resource_id=policy.identity,
|
||||
)
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Safe List is disabled in the Antispam Connection Filter Policy {policy.identity}."
|
||||
|
||||
if policy.enable_safe_list:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Safe List is not disabled in the Antispam Connection Filter Policy {policy.identity}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "m365",
|
||||
"CheckID": "defender_antispam_outbound_policy_configured",
|
||||
"CheckTitle": "Ensure Defender Outbound Spam Policies are set to notify administrators.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "defender",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "low",
|
||||
"ResourceType": "Defender Anti-Spam Policy",
|
||||
"Description": "Ensure that outbound anti-spam policies are configured to notify administrators and copy suspicious outbound messages to designated recipients when a sender is blocked for sending spam emails.",
|
||||
"Risk": "Without outbound spam notifications and message copies, compromised accounts may go undetected, increasing the risk of reputation damage or data leakage through unauthorized email activity.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/defender-office-365/outbound-spam-protection-about",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "$BccEmailAddress = @(\"<INSERT-EMAIL>\")\n$NotifyEmailAddress = @(\"<INSERT-EMAIL>\")\nSet-HostedOutboundSpamFilterPolicy -Identity Default -BccSuspiciousOutboundAdditionalRecipients $BccEmailAddress -BccSuspiciousOutboundMail $true -NotifyOutboundSpam $true -NotifyOutboundSpamRecipients $NotifyEmailAddress",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Navigate to Microsoft 365 Defender https://security.microsoft.com. 2. Click to expand Email & collaboration and select Policies & rules > Threat policies. 3. Under Policies, select Anti-spam. 4. Click on the Anti-spam outbound policy (default). 5. Select Edit protection settings then under Notifications: 6. Check 'Send a copy of suspicious outbound messages or message that exceed these limits to these users and groups' and enter the email addresses. 7. Check 'Notify these users and groups if a sender is blocked due to sending outbound spam' and enter the desired email addresses. 8. Click Save.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure Defender outbound spam filter policies to notify administrators and copy suspicious outbound messages when users are blocked for sending spam.",
|
||||
"Url": "https://learn.microsoft.com/en-us/defender-office-365/outbound-spam-protection-about"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "Ensure settings are applied to the highest priority policy if custom policies exist. Default values do not notify or copy outbound spam messages by default."
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user