mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-06 08:47:18 +00:00
Compare commits
32 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8ecc97df45 | |||
| 8db3a89669 | |||
| c802dc8a36 | |||
| 3ab9a4efa5 | |||
| 36b8aa1b79 | |||
| e821e07d7d | |||
| 228fe6d579 | |||
| 578186aa40 | |||
| 4608e45c8a | |||
| 5987651aee | |||
| 85800f2ddd | |||
| 4fb5272362 | |||
| 85d38b5f71 | |||
| 59dcdb87c4 | |||
| 9297453b8a | |||
| dd37f4ee1f | |||
| 20f36f7c84 | |||
| ec4d27746f | |||
| 7076900fb1 | |||
| 5d90352a0f | |||
| a981dc64a7 | |||
| d2086cad3f | |||
| 380b89cfb6 | |||
| 13b04d339b | |||
| be3c5fb3c1 | |||
| 1de01bcb78 | |||
| 13d983450c | |||
| 8b368e1343 | |||
| c76a9baa20 | |||
| 30e2813e02 | |||
| 0f874c6ffd | |||
| 2242689295 |
@@ -145,7 +145,7 @@ SENTRY_RELEASE=local
|
||||
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.25.0
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.26.0
|
||||
|
||||
# Social login credentials
|
||||
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
|
||||
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next API minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first API patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -272,7 +272,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next API patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
|
||||
- name: Create PR for documentation update to master
|
||||
if: steps.docs_version.outputs.skip == 'false'
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
"Alan-TheGentleman"
|
||||
"alejandrobailo"
|
||||
"amitsharm"
|
||||
"andoniaf"
|
||||
# "andoniaf"
|
||||
"cesararroba"
|
||||
"danibarranqueroo"
|
||||
"HugoPBrito"
|
||||
|
||||
@@ -349,7 +349,7 @@ jobs:
|
||||
|
||||
- name: Create PR for API dependency update
|
||||
if: ${{ env.PATCH_VERSION == '0' }}
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
commit-message: 'chore(api): update prowler dependency to ${{ env.BRANCH_NAME }} for release ${{ env.PROWLER_VERSION }}'
|
||||
|
||||
@@ -108,7 +108,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -228,7 +228,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
- name: Create pull request
|
||||
id: create-pr
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
author: 'prowler-bot <179230569+prowler-bot@users.noreply.github.com>'
|
||||
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
- name: Create pull request
|
||||
id: create-pr
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
author: 'prowler-bot <179230569+prowler-bot@users.noreply.github.com>'
|
||||
|
||||
@@ -209,11 +209,11 @@ jobs:
|
||||
echo "AWS service_paths='${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}'"
|
||||
|
||||
if [ "${STEPS_AWS_SERVICES_OUTPUTS_RUN_ALL}" = "true" ]; then
|
||||
poetry run pytest -p no:randomly -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
elif [ -z "${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}" ]; then
|
||||
echo "No AWS service paths detected; skipping AWS tests."
|
||||
else
|
||||
poetry run pytest -p no:randomly -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}
|
||||
fi
|
||||
env:
|
||||
STEPS_AWS_SERVICES_OUTPUTS_RUN_ALL: ${{ steps.aws-services.outputs.run_all }}
|
||||
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -161,7 +161,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -231,7 +231,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@5f6978faf089d4d20b00c7766989d076bb2fc7f1 # v8.1.1
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -1,17 +1,34 @@
|
||||
# Priority tiers (lower = runs first, same priority = concurrent):
|
||||
# P0 — fast file fixers
|
||||
# P10 — validators and guards
|
||||
# P20 — auto-formatters
|
||||
# P30 — linters
|
||||
# P40 — security scanners
|
||||
# P50 — dependency validation
|
||||
|
||||
default_install_hook_types: [pre-commit, pre-push]
|
||||
|
||||
repos:
|
||||
## GENERAL (prek built-in — no external repo needed)
|
||||
- repo: builtin
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
priority: 10
|
||||
- id: check-yaml
|
||||
args: ["--allow-multiple-documents"]
|
||||
exclude: (prowler/config/llm_config.yaml|contrib/)
|
||||
priority: 10
|
||||
- id: check-json
|
||||
priority: 10
|
||||
- id: end-of-file-fixer
|
||||
priority: 0
|
||||
- id: trailing-whitespace
|
||||
priority: 0
|
||||
- id: no-commit-to-branch
|
||||
priority: 10
|
||||
- id: pretty-format-json
|
||||
args: ["--autofix", --no-sort-keys, --no-ensure-ascii]
|
||||
priority: 10
|
||||
|
||||
## TOML
|
||||
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
||||
@@ -20,6 +37,7 @@ repos:
|
||||
- id: pretty-format-toml
|
||||
args: [--autofix]
|
||||
files: pyproject.toml
|
||||
priority: 20
|
||||
|
||||
## GITHUB ACTIONS
|
||||
- repo: https://github.com/zizmorcore/zizmor-pre-commit
|
||||
@@ -27,6 +45,7 @@ repos:
|
||||
hooks:
|
||||
- id: zizmor
|
||||
files: ^\.github/
|
||||
priority: 30
|
||||
|
||||
## BASH
|
||||
- repo: https://github.com/koalaman/shellcheck-precommit
|
||||
@@ -34,6 +53,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
priority: 30
|
||||
|
||||
## PYTHON — SDK (prowler/, tests/, dashboard/, util/, scripts/)
|
||||
- repo: https://github.com/myint/autoflake
|
||||
@@ -48,6 +68,7 @@ repos:
|
||||
"--remove-all-unused-imports",
|
||||
"--remove-unused-variable",
|
||||
]
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 8.0.1
|
||||
@@ -56,6 +77,7 @@ repos:
|
||||
name: "SDK - isort"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
args: ["--profile", "black"]
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 26.3.1
|
||||
@@ -63,6 +85,7 @@ repos:
|
||||
- id: black
|
||||
name: "SDK - black"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 7.3.0
|
||||
@@ -71,6 +94,7 @@ repos:
|
||||
name: "SDK - flake8"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
args: ["--ignore=E266,W503,E203,E501,W605"]
|
||||
priority: 30
|
||||
|
||||
## PYTHON — API + MCP Server (ruff)
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
@@ -80,9 +104,11 @@ repos:
|
||||
name: "API + MCP - ruff check"
|
||||
files: { glob: ["{api,mcp_server}/**/*.py"] }
|
||||
args: ["--fix"]
|
||||
priority: 30
|
||||
- id: ruff-format
|
||||
name: "API + MCP - ruff format"
|
||||
files: { glob: ["{api,mcp_server}/**/*.py"] }
|
||||
priority: 20
|
||||
|
||||
## PYTHON — Poetry
|
||||
- repo: https://github.com/python-poetry/poetry
|
||||
@@ -93,24 +119,28 @@ repos:
|
||||
args: ["--directory=./api"]
|
||||
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-lock
|
||||
name: API - poetry-lock
|
||||
args: ["--directory=./api"]
|
||||
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
files: { glob: ["{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--directory=./"]
|
||||
files: { glob: ["{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
## CONTAINERS
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
@@ -118,6 +148,7 @@ repos:
|
||||
hooks:
|
||||
- id: hadolint
|
||||
args: ["--ignore=DL3013"]
|
||||
priority: 30
|
||||
|
||||
## LOCAL HOOKS
|
||||
- repo: local
|
||||
@@ -128,6 +159,7 @@ repos:
|
||||
language: system
|
||||
types: [python]
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
priority: 30
|
||||
|
||||
- id: trufflehog
|
||||
name: TruffleHog
|
||||
@@ -138,6 +170,7 @@ repos:
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: ["pre-commit", "pre-push"]
|
||||
priority: 40
|
||||
|
||||
- id: bandit
|
||||
name: bandit
|
||||
@@ -148,6 +181,7 @@ repos:
|
||||
files: '.*\.py'
|
||||
exclude:
|
||||
{ glob: ["{contrib,skills}/**", "**/.venv/**", "**/*_test.py"] }
|
||||
priority: 40
|
||||
|
||||
- id: safety
|
||||
name: safety
|
||||
@@ -166,6 +200,7 @@ repos:
|
||||
".safety-policy.yml",
|
||||
],
|
||||
}
|
||||
priority: 40
|
||||
|
||||
- id: vulture
|
||||
name: vulture
|
||||
@@ -174,3 +209,4 @@ repos:
|
||||
language: system
|
||||
types: [python]
|
||||
files: '.*\.py'
|
||||
priority: 40
|
||||
|
||||
@@ -104,22 +104,22 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) | Support | Interface |
|
||||
|---|---|---|---|---|---|---|
|
||||
| AWS | 572 | 83 | 41 | 17 | Official | UI, API, CLI |
|
||||
| Azure | 165 | 20 | 18 | 13 | Official | UI, API, CLI |
|
||||
| GCP | 100 | 13 | 15 | 11 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 7 | 9 | Official | UI, API, CLI |
|
||||
| GitHub | 21 | 2 | 1 | 2 | Official | UI, API, CLI |
|
||||
| M365 | 89 | 9 | 4 | 5 | Official | UI, API, CLI |
|
||||
| OCI | 48 | 13 | 3 | 10 | Official | UI, API, CLI |
|
||||
| Alibaba Cloud | 61 | 9 | 3 | 9 | Official | UI, API, CLI |
|
||||
| Cloudflare | 29 | 2 | 0 | 5 | Official | UI, API, CLI |
|
||||
| AWS | 595 | 84 | 43 | 17 | Official | UI, API, CLI |
|
||||
| Azure | 167 | 22 | 19 | 16 | Official | UI, API, CLI |
|
||||
| GCP | 102 | 18 | 17 | 12 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 7 | 11 | Official | UI, API, CLI |
|
||||
| GitHub | 24 | 3 | 1 | 5 | Official | UI, API, CLI |
|
||||
| M365 | 101 | 10 | 4 | 10 | Official | UI, API, CLI |
|
||||
| OCI | 51 | 14 | 4 | 10 | Official | UI, API, CLI |
|
||||
| Alibaba Cloud | 61 | 9 | 4 | 9 | Official | UI, API, CLI |
|
||||
| Cloudflare | 29 | 3 | 0 | 5 | Official | UI, API, CLI |
|
||||
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | UI, API, CLI |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 8 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| Image | N/A | N/A | N/A | N/A | Official | CLI, API |
|
||||
| Google Workspace | 1 | 1 | 0 | 1 | Official | CLI |
|
||||
| OpenStack | 27 | 4 | 0 | 8 | Official | UI, API, CLI |
|
||||
| Vercel | 30 | 6 | 0 | 5 | Official | CLI |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | CLI |
|
||||
| OpenStack | 34 | 5 | 0 | 9 | Official | UI, API, CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
> [!Note]
|
||||
|
||||
+18
-1
@@ -2,6 +2,23 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.27.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- New `scan-reset-ephemeral-resources` post-scan task zeroes `failed_findings_count` for resources missing from the latest full-scope scan, keeping ephemeral resources from polluting the Resources page sort [(#10929)](https://github.com/prowler-cloud/prowler/pull/10929)
|
||||
|
||||
---
|
||||
|
||||
## [1.26.1] (Prowler v5.25.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Attack Paths: AWS scans no longer fail when enabled regions cannot be retrieved, and scans stuck in `scheduled` state are now cleaned up after the stale threshold [(#10917)](https://github.com/prowler-cloud/prowler/pull/10917)
|
||||
- Scan report and compliance downloads now redirect to a presigned S3 URL instead of streaming through the API worker, preventing gunicorn timeouts on large files [(#10927)](https://github.com/prowler-cloud/prowler/pull/10927)
|
||||
|
||||
---
|
||||
|
||||
## [1.26.0] (Prowler v5.25.0)
|
||||
|
||||
### 🚀 Added
|
||||
@@ -12,7 +29,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Allows tenant owners to expel users from their organizations [(#10787)](https://github.com/prowler-cloud/prowler/pull/10787)
|
||||
- Allows tenant owners to expel users from their organizations [(#10787)](https://github.com/prowler-cloud/prowler/pull/10787)
|
||||
- `aggregate_findings`, `aggregate_attack_surface`, `aggregate_scan_resource_group_summaries` and `aggregate_scan_category_summaries` now upsert via `bulk_create(update_conflicts=True, ...)` instead of the prior `ignore_conflicts=True` / plain INSERT / `already backfilled` short-circuit. Re-runs triggered by the post-mute reaggregation pipeline no longer trip the `unique_*_per_scan` constraints nor silently drop updates, and are race-safe under concurrent writers (e.g. scan completion overlapping with a fresh mute rule) [(#10843)](https://github.com/prowler-cloud/prowler/pull/10843)
|
||||
- Rename the scan-category and scan-resource-group summary aggregators from `backfill_*` to `aggregate_*` [(#10843)](https://github.com/prowler-cloud/prowler/pull/10843)
|
||||
|
||||
|
||||
+1
-1
@@ -50,7 +50,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.26.0"
|
||||
version = "1.27.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -52,7 +52,7 @@ class ApiConfig(AppConfig):
|
||||
"check_and_fix_socialaccount_sites_migration",
|
||||
]
|
||||
|
||||
# Skip Neo4j initialization during tests, some Django commands, and Celery
|
||||
# Skip eager Neo4j init for tests, some Django commands, and Celery (prefork pool: driver must stay lazy, no post_fork hook)
|
||||
if getattr(settings, "TESTING", False) or (
|
||||
len(sys.argv) > 1
|
||||
and (
|
||||
@@ -64,7 +64,7 @@ class ApiConfig(AppConfig):
|
||||
)
|
||||
):
|
||||
logger.info(
|
||||
"Skipping Neo4j initialization because tests, some Django commands or Celery"
|
||||
"Skipping eager Neo4j init: tests, some Django commands, or Celery prefork pool (driver stays lazy)"
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
@@ -595,10 +595,40 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
all_objects = models.Manager()
|
||||
|
||||
_SCOPING_SCANNER_ARG_KEYS_CACHE: tuple[str, ...] | None = None
|
||||
|
||||
@classmethod
|
||||
def get_scoping_scanner_arg_keys(cls) -> tuple[str, ...]:
|
||||
"""Return the scanner_args keys that mark a scan as scoped.
|
||||
|
||||
Derived from ``prowler.lib.scan.scan.Scan.__init__`` so the API stays
|
||||
in sync with whatever the SDK actually accepts as filters. Cached at
|
||||
class level — the signature is stable for the process lifetime.
|
||||
"""
|
||||
if cls._SCOPING_SCANNER_ARG_KEYS_CACHE is None:
|
||||
import inspect
|
||||
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
|
||||
params = inspect.signature(ProwlerScan.__init__).parameters
|
||||
cls._SCOPING_SCANNER_ARG_KEYS_CACHE = tuple(
|
||||
name for name in params if name not in ("self", "provider")
|
||||
)
|
||||
return cls._SCOPING_SCANNER_ARG_KEYS_CACHE
|
||||
|
||||
class TriggerChoices(models.TextChoices):
|
||||
SCHEDULED = "scheduled", _("Scheduled")
|
||||
MANUAL = "manual", _("Manual")
|
||||
|
||||
# Trigger values for scans that ran the SDK end-to-end. Imported scans (or
|
||||
# any future trigger) are intentionally NOT in this set — they may carry
|
||||
# only a partial slice of resources, so post-scan logic that depends on a
|
||||
# full-scope sweep (e.g. resetting ephemeral resource findings) must skip
|
||||
# them by default.
|
||||
LIVE_SCAN_TRIGGERS = frozenset(
|
||||
(TriggerChoices.SCHEDULED.value, TriggerChoices.MANUAL.value)
|
||||
)
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
name = models.CharField(
|
||||
blank=True, null=True, max_length=100, validators=[MinLengthValidator(3)]
|
||||
@@ -681,6 +711,24 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scans"
|
||||
|
||||
def is_full_scope(self) -> bool:
|
||||
"""Return True if this scan ran with no scoping filters at all.
|
||||
|
||||
Used to gate post-scan operations (such as resetting the
|
||||
failed_findings_count of resources missing from the scan) that are only
|
||||
safe when the scan covered every check, service, and category. Imported
|
||||
scans are NOT full-scope by definition — they may carry only a partial
|
||||
slice of resources, so they're rejected via ``trigger`` even before the
|
||||
scanner_args check.
|
||||
"""
|
||||
if self.trigger not in self.LIVE_SCAN_TRIGGERS:
|
||||
return False
|
||||
scanner_args = self.scanner_args or {}
|
||||
for key in self.get_scoping_scanner_arg_keys():
|
||||
if scanner_args.get(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class AttackPathsScan(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
|
||||
+1620
-58
File diff suppressed because it is too large
Load Diff
@@ -3841,9 +3841,14 @@ class TestScanViewSet:
|
||||
"prowler-output-123_threatscore_report.pdf",
|
||||
)
|
||||
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/"
|
||||
"tenant-id/scan-id/threatscore/prowler-output-123_threatscore_report.pdf"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
mock_s3_client = Mock()
|
||||
mock_s3_client.list_objects_v2.return_value = {"Contents": [{"Key": pdf_key}]}
|
||||
mock_s3_client.get_object.return_value = {"Body": io.BytesIO(b"pdf-bytes")}
|
||||
mock_s3_client.generate_presigned_url.return_value = presigned_url
|
||||
|
||||
mock_env_str.return_value = bucket
|
||||
mock_get_s3_client.return_value = mock_s3_client
|
||||
@@ -3852,19 +3857,26 @@ class TestScanViewSet:
|
||||
url = reverse("scan-threatscore", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response["Content-Type"] == "application/pdf"
|
||||
assert response["Content-Disposition"].endswith(
|
||||
'"prowler-output-123_threatscore_report.pdf"'
|
||||
)
|
||||
assert response.content == b"pdf-bytes"
|
||||
assert response.status_code == status.HTTP_302_FOUND
|
||||
assert response["Location"] == presigned_url
|
||||
mock_s3_client.list_objects_v2.assert_called_once()
|
||||
mock_s3_client.get_object.assert_called_once_with(Bucket=bucket, Key=pdf_key)
|
||||
mock_s3_client.generate_presigned_url.assert_called_once_with(
|
||||
"get_object",
|
||||
Params={
|
||||
"Bucket": bucket,
|
||||
"Key": pdf_key,
|
||||
"ResponseContentDisposition": (
|
||||
'attachment; filename="prowler-output-123_threatscore_report.pdf"'
|
||||
),
|
||||
"ResponseContentType": "application/pdf",
|
||||
},
|
||||
ExpiresIn=300,
|
||||
)
|
||||
|
||||
def test_report_s3_success(self, authenticated_client, scans_fixture, monkeypatch):
|
||||
"""
|
||||
When output_location is an S3 URL and the S3 client returns the file successfully,
|
||||
the view should return the ZIP file with HTTP 200 and proper headers.
|
||||
When output_location is an S3 URL and the object exists,
|
||||
the view should return a 302 redirect to a presigned S3 URL.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
bucket = "test-bucket"
|
||||
@@ -3878,22 +3890,33 @@ class TestScanViewSet:
|
||||
type("env", (), {"str": lambda self, *args, **kwargs: "test-bucket"})(),
|
||||
)
|
||||
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/report.zip"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def get_object(self, Bucket, Key):
|
||||
def head_object(self, Bucket, Key):
|
||||
assert Bucket == bucket
|
||||
assert Key == key
|
||||
return {"Body": io.BytesIO(b"s3 zip content")}
|
||||
return {}
|
||||
|
||||
def generate_presigned_url(self, ClientMethod, Params, ExpiresIn):
|
||||
assert ClientMethod == "get_object"
|
||||
assert Params["Bucket"] == bucket
|
||||
assert Params["Key"] == key
|
||||
assert Params["ResponseContentDisposition"] == (
|
||||
'attachment; filename="report.zip"'
|
||||
)
|
||||
assert ExpiresIn == 300
|
||||
return presigned_url
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
expected_filename = os.path.basename("report.zip")
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{expected_filename}"' in content_disposition
|
||||
assert response.content == b"s3 zip content"
|
||||
assert response.status_code == status.HTTP_302_FOUND
|
||||
assert response["Location"] == presigned_url
|
||||
|
||||
def test_report_s3_success_no_local_files(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -4032,23 +4055,31 @@ class TestScanViewSet:
|
||||
)
|
||||
|
||||
match_key = "path/compliance/mitre_attack_aws.csv"
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/path/compliance/mitre_attack_aws.csv"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def list_objects_v2(self, Bucket, Prefix):
|
||||
return {"Contents": [{"Key": match_key}]}
|
||||
|
||||
def get_object(self, Bucket, Key):
|
||||
return {"Body": io.BytesIO(b"ignored")}
|
||||
def generate_presigned_url(self, ClientMethod, Params, ExpiresIn):
|
||||
assert ClientMethod == "get_object"
|
||||
assert Params["Key"] == match_key
|
||||
assert Params["ResponseContentDisposition"] == (
|
||||
'attachment; filename="mitre_attack_aws.csv"'
|
||||
)
|
||||
assert ExpiresIn == 300
|
||||
return presigned_url
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
framework = match_key.split("/")[-1].split(".")[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith('filename="mitre_attack_aws.csv"')
|
||||
assert resp.status_code == status.HTTP_302_FOUND
|
||||
assert resp["Location"] == presigned_url
|
||||
|
||||
def test_compliance_s3_not_found(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -4251,8 +4282,8 @@ class TestScanViewSet:
|
||||
scan.save()
|
||||
|
||||
fake_client = MagicMock()
|
||||
fake_client.get_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "NoSuchKey"}}, "GetObject"
|
||||
fake_client.head_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "NoSuchKey"}}, "HeadObject"
|
||||
)
|
||||
mock_get_s3_client.return_value = fake_client
|
||||
|
||||
@@ -4275,8 +4306,8 @@ class TestScanViewSet:
|
||||
scan.save()
|
||||
|
||||
fake_client = MagicMock()
|
||||
fake_client.get_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "AccessDenied"}}, "GetObject"
|
||||
fake_client.head_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "AccessDenied"}}, "HeadObject"
|
||||
)
|
||||
mock_get_s3_client.return_value = fake_client
|
||||
|
||||
|
||||
+112
-38
@@ -53,7 +53,7 @@ from django.db.models import (
|
||||
)
|
||||
from django.db.models.fields.json import KeyTextTransform
|
||||
from django.db.models.functions import Cast, Coalesce, RowNumber
|
||||
from django.http import HttpResponse, QueryDict
|
||||
from django.http import HttpResponse, HttpResponseBase, HttpResponseRedirect, QueryDict
|
||||
from django.shortcuts import redirect
|
||||
from django.urls import reverse
|
||||
from django.utils.dateparse import parse_date
|
||||
@@ -422,7 +422,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.26.0"
|
||||
spectacular_settings.VERSION = "1.27.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -2080,24 +2080,38 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
},
|
||||
)
|
||||
|
||||
def _load_file(self, path_pattern, s3=False, bucket=None, list_objects=False):
|
||||
def _load_file(
|
||||
self,
|
||||
path_pattern,
|
||||
s3=False,
|
||||
bucket=None,
|
||||
list_objects=False,
|
||||
content_type=None,
|
||||
):
|
||||
"""
|
||||
Loads a binary file (e.g., ZIP or CSV) and returns its content and filename.
|
||||
Resolve a report file location and return the bytes (filesystem) or a redirect (S3).
|
||||
|
||||
Depending on the input parameters, this method supports loading:
|
||||
- From S3 using a direct key.
|
||||
- From S3 by listing objects under a prefix and matching suffix.
|
||||
- From the local filesystem using glob pattern matching.
|
||||
- From S3 using a direct key, returns a 302 to a short-lived presigned URL.
|
||||
- From S3 by listing objects under a prefix and matching suffix, returns a 302 to a short-lived presigned URL.
|
||||
- From the local filesystem using glob pattern matching, returns the file bytes.
|
||||
|
||||
The S3 branch never streams bytes through the worker; this prevents gunicorn
|
||||
worker timeouts on large reports.
|
||||
|
||||
Args:
|
||||
path_pattern (str): The key or glob pattern representing the file location.
|
||||
s3 (bool, optional): Whether the file is stored in S3. Defaults to False.
|
||||
bucket (str, optional): The name of the S3 bucket, required if `s3=True`. Defaults to None.
|
||||
list_objects (bool, optional): If True and `s3=True`, list objects by prefix to find the file. Defaults to False.
|
||||
content_type (str, optional): On the S3 branch, forwarded as `ResponseContentType`
|
||||
so the presigned download advertises the same Content-Type the API used to send.
|
||||
Ignored on the filesystem branch.
|
||||
|
||||
Returns:
|
||||
tuple[bytes, str]: A tuple containing the file content as bytes and the filename if successful.
|
||||
Response: A DRF `Response` object with an appropriate status and error detail if an error occurs.
|
||||
tuple[bytes, str]: For the filesystem branch, the file content and filename.
|
||||
HttpResponseRedirect: For the S3 branch on success, a 302 redirect to a presigned `GetObject` URL.
|
||||
Response: For any error path, a DRF `Response` with an appropriate status and detail.
|
||||
"""
|
||||
if s3:
|
||||
try:
|
||||
@@ -2144,25 +2158,45 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
# path_pattern here is prefix, but in compliance we build correct suffix check before
|
||||
key = keys[0]
|
||||
else:
|
||||
# path_pattern is exact key
|
||||
# path_pattern is exact key; HEAD before presigning to preserve the 404 contract.
|
||||
key = path_pattern
|
||||
try:
|
||||
s3_obj = client.get_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code == "NoSuchKey":
|
||||
try:
|
||||
client.head_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code in ("NoSuchKey", "404"):
|
||||
return Response(
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
return Response(
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
return Response(
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
content = s3_obj["Body"].read()
|
||||
|
||||
filename = os.path.basename(key)
|
||||
# escape quotes and strip CR/LF so a malformed key cannot break out of the header
|
||||
safe_filename = (
|
||||
filename.replace("\\", "\\\\")
|
||||
.replace('"', '\\"')
|
||||
.replace("\r", "")
|
||||
.replace("\n", "")
|
||||
)
|
||||
params = {
|
||||
"Bucket": bucket,
|
||||
"Key": key,
|
||||
"ResponseContentDisposition": f'attachment; filename="{safe_filename}"',
|
||||
}
|
||||
if content_type:
|
||||
params["ResponseContentType"] = content_type
|
||||
url = client.generate_presigned_url(
|
||||
"get_object",
|
||||
Params=params,
|
||||
ExpiresIn=300,
|
||||
)
|
||||
return HttpResponseRedirect(url)
|
||||
else:
|
||||
files = glob.glob(path_pattern)
|
||||
if not files:
|
||||
@@ -2205,12 +2239,16 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
bucket = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
key_prefix = scan.output_location.removeprefix(f"s3://{bucket}/")
|
||||
loader = self._load_file(
|
||||
key_prefix, s3=True, bucket=bucket, list_objects=False
|
||||
key_prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=False,
|
||||
content_type="application/x-zip-compressed",
|
||||
)
|
||||
else:
|
||||
loader = self._load_file(scan.output_location, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2248,13 +2286,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
prefix = os.path.join(
|
||||
os.path.dirname(key_prefix), "compliance", f"{name}.csv"
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="text/csv",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "compliance", f"*_{name}.csv")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2287,13 +2331,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"cis",
|
||||
"*_cis_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "cis", "*_cis_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2327,13 +2377,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"threatscore",
|
||||
"*_threatscore_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "threatscore", "*_threatscore_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2367,13 +2423,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"ens",
|
||||
"*_ens_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "ens", "*_ens_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2406,13 +2468,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"nis2",
|
||||
"*_nis2_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "nis2", "*_nis2_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2445,13 +2513,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"csa",
|
||||
"*_csa_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "csa", "*_csa_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
|
||||
@@ -49,7 +49,7 @@ def start_aws_ingestion(
|
||||
}
|
||||
|
||||
boto3_session = get_boto3_session(prowler_api_provider, prowler_sdk_provider)
|
||||
regions: list[str] = list(prowler_sdk_provider._enabled_regions)
|
||||
regions: list[str] = resolve_aws_regions(prowler_api_provider, prowler_sdk_provider)
|
||||
requested_syncs = list(cartography_aws.RESOURCE_FUNCTIONS.keys())
|
||||
|
||||
sync_args = cartography_aws._build_aws_sync_kwargs(
|
||||
@@ -226,6 +226,48 @@ def get_boto3_session(
|
||||
return boto3_session
|
||||
|
||||
|
||||
def resolve_aws_regions(
|
||||
prowler_api_provider: ProwlerAPIProvider,
|
||||
prowler_sdk_provider: ProwlerSDKProvider,
|
||||
) -> list[str]:
|
||||
"""Resolve the regions to scan, falling back when `_enabled_regions` is `None`.
|
||||
|
||||
The SDK silently sets `_enabled_regions` to `None` when `ec2:DescribeRegions`
|
||||
fails (missing IAM permission, transient error). Without a fallback the
|
||||
Cartography ingestion crashes with a non-actionable `TypeError`. Try the
|
||||
user's `audited_regions` next, then the partition's static region list.
|
||||
Excluded regions are honored on every branch.
|
||||
"""
|
||||
if prowler_sdk_provider._enabled_regions is not None:
|
||||
regions = set(prowler_sdk_provider._enabled_regions)
|
||||
|
||||
elif prowler_sdk_provider.identity.audited_regions:
|
||||
regions = set(prowler_sdk_provider.identity.audited_regions)
|
||||
|
||||
else:
|
||||
partition = prowler_sdk_provider.identity.partition
|
||||
try:
|
||||
regions = prowler_sdk_provider.get_available_aws_service_regions(
|
||||
"ec2", partition
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
f"No region data available for partition {partition!r}; "
|
||||
f"cannot determine regions to scan for "
|
||||
f"{prowler_api_provider.uid}"
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
f"Could not enumerate enabled regions for AWS account "
|
||||
f"{prowler_api_provider.uid}; falling back to all regions in "
|
||||
f"partition {partition!r}"
|
||||
)
|
||||
|
||||
excluded = set(getattr(prowler_sdk_provider, "_excluded_regions", None) or ())
|
||||
return sorted(regions - excluded)
|
||||
|
||||
|
||||
def get_aioboto3_session(boto3_session: boto3.Session) -> aioboto3.Session:
|
||||
return aioboto3.Session(botocore_session=boto3_session._session)
|
||||
|
||||
|
||||
@@ -18,28 +18,45 @@ logger = get_task_logger(__name__)
|
||||
|
||||
def cleanup_stale_attack_paths_scans() -> dict:
|
||||
"""
|
||||
Find `EXECUTING` `AttackPathsScan` scans whose workers are dead or that have
|
||||
exceeded the stale threshold, and mark them as `FAILED`.
|
||||
Mark stale `AttackPathsScan` rows as `FAILED`.
|
||||
|
||||
Two-pass detection:
|
||||
Covers two stuck-state scenarios:
|
||||
1. `EXECUTING` scans whose workers are dead, or that have exceeded the
|
||||
stale threshold while alive.
|
||||
2. `SCHEDULED` scans that never made it to a worker — parent scan
|
||||
crashed before dispatch, broker lost the message, etc. Detected by
|
||||
age plus the parent `Scan` no longer being in flight.
|
||||
"""
|
||||
threshold = timedelta(minutes=ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
cutoff = now - threshold
|
||||
|
||||
cleaned_up: list[str] = []
|
||||
cleaned_up.extend(_cleanup_stale_executing_scans(cutoff))
|
||||
cleaned_up.extend(_cleanup_stale_scheduled_scans(cutoff))
|
||||
|
||||
logger.info(
|
||||
f"Stale `AttackPathsScan` cleanup: {len(cleaned_up)} scan(s) cleaned up"
|
||||
)
|
||||
return {"cleaned_up_count": len(cleaned_up), "scan_ids": cleaned_up}
|
||||
|
||||
|
||||
def _cleanup_stale_executing_scans(cutoff: datetime) -> list[str]:
|
||||
"""
|
||||
Two-pass detection for `EXECUTING` scans:
|
||||
1. If `TaskResult.worker` exists, ping the worker.
|
||||
- Dead worker: cleanup immediately (any age).
|
||||
- Alive + past threshold: revoke the task, then cleanup.
|
||||
- Alive + within threshold: skip.
|
||||
2. If no worker field: fall back to time-based heuristic only.
|
||||
"""
|
||||
threshold = timedelta(minutes=ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
cutoff = now - threshold
|
||||
|
||||
executing_scans = (
|
||||
executing_scans = list(
|
||||
AttackPathsScan.all_objects.using(MainRouter.admin_db)
|
||||
.filter(state=StateChoices.EXECUTING)
|
||||
.select_related("task__task_runner_task")
|
||||
)
|
||||
|
||||
# Cache worker liveness so each worker is pinged at most once
|
||||
executing_scans = list(executing_scans)
|
||||
workers = {
|
||||
tr.worker
|
||||
for scan in executing_scans
|
||||
@@ -48,7 +65,7 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
}
|
||||
worker_alive = {w: _is_worker_alive(w) for w in workers}
|
||||
|
||||
cleaned_up = []
|
||||
cleaned_up: list[str] = []
|
||||
|
||||
for scan in executing_scans:
|
||||
task_result = (
|
||||
@@ -65,9 +82,7 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
|
||||
# Alive but stale — revoke before cleanup
|
||||
_revoke_task(task_result)
|
||||
reason = (
|
||||
"Scan exceeded stale threshold — " "cleaned up by periodic task"
|
||||
)
|
||||
reason = "Scan exceeded stale threshold — cleaned up by periodic task"
|
||||
else:
|
||||
reason = "Worker dead — cleaned up by periodic task"
|
||||
else:
|
||||
@@ -82,10 +97,57 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
if _cleanup_scan(scan, task_result, reason):
|
||||
cleaned_up.append(str(scan.id))
|
||||
|
||||
logger.info(
|
||||
f"Stale `AttackPathsScan` cleanup: {len(cleaned_up)} scan(s) cleaned up"
|
||||
return cleaned_up
|
||||
|
||||
|
||||
def _cleanup_stale_scheduled_scans(cutoff: datetime) -> list[str]:
|
||||
"""
|
||||
Cleanup `SCHEDULED` scans that never reached a worker.
|
||||
|
||||
Detection:
|
||||
- `state == SCHEDULED`
|
||||
- `started_at < cutoff`
|
||||
- parent `Scan` is no longer in flight (terminal state or missing). This
|
||||
avoids cleaning up rows whose parent Prowler scan is legitimately still
|
||||
running.
|
||||
|
||||
For each match: revoke the queued task (best-effort; harmless if already
|
||||
consumed), atomically flip to `FAILED`, and mark the `TaskResult`. The
|
||||
temp Neo4j database is never created while `SCHEDULED`, so no drop is
|
||||
needed.
|
||||
"""
|
||||
scheduled_scans = list(
|
||||
AttackPathsScan.all_objects.using(MainRouter.admin_db)
|
||||
.filter(
|
||||
state=StateChoices.SCHEDULED,
|
||||
started_at__lt=cutoff,
|
||||
)
|
||||
.select_related("task__task_runner_task", "scan")
|
||||
)
|
||||
return {"cleaned_up_count": len(cleaned_up), "scan_ids": cleaned_up}
|
||||
|
||||
cleaned_up: list[str] = []
|
||||
parent_terminal = (
|
||||
StateChoices.COMPLETED,
|
||||
StateChoices.FAILED,
|
||||
StateChoices.CANCELLED,
|
||||
)
|
||||
|
||||
for scan in scheduled_scans:
|
||||
parent_scan = scan.scan
|
||||
if parent_scan is not None and parent_scan.state not in parent_terminal:
|
||||
continue
|
||||
|
||||
task_result = (
|
||||
getattr(scan.task, "task_runner_task", None) if scan.task else None
|
||||
)
|
||||
if task_result:
|
||||
_revoke_task(task_result, terminate=False)
|
||||
|
||||
reason = "Scan never started — cleaned up by periodic task"
|
||||
if _cleanup_scheduled_scan(scan, task_result, reason):
|
||||
cleaned_up.append(str(scan.id))
|
||||
|
||||
return cleaned_up
|
||||
|
||||
|
||||
def _is_worker_alive(worker: str) -> bool:
|
||||
@@ -98,12 +160,17 @@ def _is_worker_alive(worker: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _revoke_task(task_result) -> None:
|
||||
"""Send `SIGTERM` to a hung Celery task. Non-fatal on failure."""
|
||||
def _revoke_task(task_result, terminate: bool = True) -> None:
|
||||
"""Revoke a Celery task. Non-fatal on failure.
|
||||
|
||||
`terminate=True` SIGTERMs the worker if the task is mid-execution; use
|
||||
for EXECUTING cleanup. `terminate=False` only marks the task id revoked
|
||||
across workers, so any worker pulling the queued message discards it;
|
||||
use for SCHEDULED cleanup where the task hasn't run yet.
|
||||
"""
|
||||
try:
|
||||
current_app.control.revoke(
|
||||
task_result.task_id, terminate=True, signal="SIGTERM"
|
||||
)
|
||||
kwargs = {"terminate": True, "signal": "SIGTERM"} if terminate else {}
|
||||
current_app.control.revoke(task_result.task_id, **kwargs)
|
||||
logger.info(f"Revoked task {task_result.task_id}")
|
||||
except Exception:
|
||||
logger.exception(f"Failed to revoke task {task_result.task_id}")
|
||||
@@ -125,28 +192,64 @@ def _cleanup_scan(scan, task_result, reason: str) -> bool:
|
||||
except Exception:
|
||||
logger.exception(f"Failed to drop temp database {tmp_db_name}")
|
||||
|
||||
# 2. Lock row, verify still EXECUTING, mark FAILED — all atomic
|
||||
with rls_transaction(str(scan.tenant_id)):
|
||||
try:
|
||||
fresh_scan = AttackPathsScan.objects.select_for_update().get(id=scan.id)
|
||||
except AttackPathsScan.DoesNotExist:
|
||||
logger.warning(f"Scan {scan_id_str} no longer exists, skipping")
|
||||
return False
|
||||
fresh_scan = _finalize_failed_scan(scan, StateChoices.EXECUTING, reason)
|
||||
if fresh_scan is None:
|
||||
return False
|
||||
|
||||
if fresh_scan.state != StateChoices.EXECUTING:
|
||||
logger.info(f"Scan {scan_id_str} is now {fresh_scan.state}, skipping")
|
||||
return False
|
||||
|
||||
_mark_scan_finished(fresh_scan, StateChoices.FAILED, {"global_error": reason})
|
||||
|
||||
# 3. Mark `TaskResult` as `FAILURE` (not RLS-protected, outside lock)
|
||||
# Mark `TaskResult` as `FAILURE` (not RLS-protected, outside lock)
|
||||
if task_result:
|
||||
task_result.status = states.FAILURE
|
||||
task_result.date_done = datetime.now(tz=timezone.utc)
|
||||
task_result.save(update_fields=["status", "date_done"])
|
||||
|
||||
# 4. Recover graph_data_ready if provider data still exists
|
||||
recover_graph_data_ready(fresh_scan)
|
||||
|
||||
logger.info(f"Cleaned up stale scan {scan_id_str}: {reason}")
|
||||
return True
|
||||
|
||||
|
||||
def _cleanup_scheduled_scan(scan, task_result, reason: str) -> bool:
|
||||
"""
|
||||
Clean up a `SCHEDULED` scan that never reached a worker.
|
||||
|
||||
Skips the temp Neo4j drop — the database is only created once the worker
|
||||
enters `EXECUTING`, so dropping it here just produces noisy log output.
|
||||
|
||||
Returns `True` if the scan was actually cleaned up, `False` if skipped.
|
||||
"""
|
||||
scan_id_str = str(scan.id)
|
||||
|
||||
fresh_scan = _finalize_failed_scan(scan, StateChoices.SCHEDULED, reason)
|
||||
if fresh_scan is None:
|
||||
return False
|
||||
|
||||
if task_result:
|
||||
task_result.status = states.FAILURE
|
||||
task_result.date_done = datetime.now(tz=timezone.utc)
|
||||
task_result.save(update_fields=["status", "date_done"])
|
||||
|
||||
logger.info(f"Cleaned up scheduled scan {scan_id_str}: {reason}")
|
||||
return True
|
||||
|
||||
|
||||
def _finalize_failed_scan(scan, expected_state: str, reason: str):
|
||||
"""
|
||||
Atomically lock the row, verify it's still in `expected_state`, and
|
||||
mark it `FAILED`. Returns the locked row on success, `None` if the
|
||||
row is gone or has already moved on.
|
||||
"""
|
||||
scan_id_str = str(scan.id)
|
||||
with rls_transaction(str(scan.tenant_id)):
|
||||
try:
|
||||
fresh_scan = AttackPathsScan.objects.select_for_update().get(id=scan.id)
|
||||
except AttackPathsScan.DoesNotExist:
|
||||
logger.warning(f"Scan {scan_id_str} no longer exists, skipping")
|
||||
return None
|
||||
|
||||
if fresh_scan.state != expected_state:
|
||||
logger.info(f"Scan {scan_id_str} is now {fresh_scan.state}, skipping")
|
||||
return None
|
||||
|
||||
_mark_scan_finished(fresh_scan, StateChoices.FAILED, {"global_error": reason})
|
||||
|
||||
return fresh_scan
|
||||
|
||||
@@ -67,25 +67,52 @@ def retrieve_attack_paths_scan(
|
||||
return None
|
||||
|
||||
|
||||
def set_attack_paths_scan_task_id(
|
||||
tenant_id: str,
|
||||
scan_pk: str,
|
||||
task_id: str,
|
||||
) -> None:
|
||||
"""Persist the Celery `task_id` on the `AttackPathsScan` row.
|
||||
|
||||
Called at dispatch time (when `apply_async` returns) so the row carries
|
||||
the task id even while still `SCHEDULED`. This lets the periodic
|
||||
cleanup revoke queued messages for scans that never reached a worker.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
ProwlerAPIAttackPathsScan.objects.filter(id=scan_pk).update(task_id=task_id)
|
||||
|
||||
|
||||
def starting_attack_paths_scan(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
task_id: str,
|
||||
cartography_config: CartographyConfig,
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
attack_paths_scan.task_id = task_id
|
||||
attack_paths_scan.state = StateChoices.EXECUTING
|
||||
attack_paths_scan.started_at = datetime.now(tz=timezone.utc)
|
||||
attack_paths_scan.update_tag = cartography_config.update_tag
|
||||
) -> bool:
|
||||
"""Flip the row from `SCHEDULED` to `EXECUTING` atomically.
|
||||
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"task_id",
|
||||
"state",
|
||||
"started_at",
|
||||
"update_tag",
|
||||
]
|
||||
)
|
||||
Returns `False` if the row is gone or has already moved past
|
||||
`SCHEDULED` (e.g., periodic cleanup raced ahead and marked it
|
||||
`FAILED` while the worker message was still in flight).
|
||||
"""
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
try:
|
||||
locked = ProwlerAPIAttackPathsScan.objects.select_for_update().get(
|
||||
id=attack_paths_scan.id
|
||||
)
|
||||
except ProwlerAPIAttackPathsScan.DoesNotExist:
|
||||
return False
|
||||
|
||||
if locked.state != StateChoices.SCHEDULED:
|
||||
return False
|
||||
|
||||
locked.state = StateChoices.EXECUTING
|
||||
locked.started_at = datetime.now(tz=timezone.utc)
|
||||
locked.update_tag = cartography_config.update_tag
|
||||
locked.save(update_fields=["state", "started_at", "update_tag"])
|
||||
|
||||
# Keep the in-memory object the caller is holding in sync.
|
||||
attack_paths_scan.state = locked.state
|
||||
attack_paths_scan.started_at = locked.started_at
|
||||
attack_paths_scan.update_tag = locked.update_tag
|
||||
return True
|
||||
|
||||
|
||||
def _mark_scan_finished(
|
||||
|
||||
@@ -97,6 +97,19 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
)
|
||||
attack_paths_scan = db_utils.retrieve_attack_paths_scan(tenant_id, scan_id)
|
||||
|
||||
# Idempotency guard: cleanup may have flipped this row to a terminal state
|
||||
# while the message was still in flight. Bail out before touching state.
|
||||
if attack_paths_scan and attack_paths_scan.state in (
|
||||
StateChoices.FAILED,
|
||||
StateChoices.COMPLETED,
|
||||
StateChoices.CANCELLED,
|
||||
):
|
||||
logger.warning(
|
||||
f"Attack Paths scan {attack_paths_scan.id} already in terminal "
|
||||
f"state {attack_paths_scan.state}; skipping execution"
|
||||
)
|
||||
return {}
|
||||
|
||||
# Checks before starting the scan
|
||||
if not cartography_ingestion_function:
|
||||
ingestion_exceptions = {
|
||||
@@ -114,12 +127,17 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
|
||||
else:
|
||||
if not attack_paths_scan:
|
||||
# Safety net for in-flight messages or direct task invocations; dispatcher normally pre-creates the row.
|
||||
logger.warning(
|
||||
f"No Attack Paths Scan found for scan {scan_id} and tenant {tenant_id}, let's create it then"
|
||||
)
|
||||
attack_paths_scan = db_utils.create_attack_paths_scan(
|
||||
tenant_id, scan_id, prowler_api_provider.id
|
||||
)
|
||||
if attack_paths_scan and task_id:
|
||||
db_utils.set_attack_paths_scan_task_id(
|
||||
tenant_id, attack_paths_scan.id, task_id
|
||||
)
|
||||
|
||||
tmp_database_name = graph_database.get_database_name(
|
||||
attack_paths_scan.id, temporary=True
|
||||
@@ -141,9 +159,13 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
)
|
||||
|
||||
# Starting the Attack Paths scan
|
||||
db_utils.starting_attack_paths_scan(
|
||||
attack_paths_scan, task_id, tenant_cartography_config
|
||||
)
|
||||
if not db_utils.starting_attack_paths_scan(
|
||||
attack_paths_scan, tenant_cartography_config
|
||||
):
|
||||
logger.warning(
|
||||
f"Attack Paths scan {attack_paths_scan.id} no longer in SCHEDULED state; cleanup likely raced ahead"
|
||||
)
|
||||
return {}
|
||||
|
||||
scan_t0 = time.perf_counter()
|
||||
logger.info(
|
||||
|
||||
@@ -10,16 +10,29 @@ from typing import Any
|
||||
|
||||
import sentry_sdk
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from config.env import env
|
||||
from config.settings.celery import CELERY_DEADLOCK_ATTEMPTS
|
||||
from django.db import IntegrityError, OperationalError
|
||||
from django.db.models import Case, Count, IntegerField, Max, Min, Prefetch, Q, Sum, When
|
||||
from django.db.models import (
|
||||
Case,
|
||||
Count,
|
||||
Exists,
|
||||
IntegerField,
|
||||
Max,
|
||||
Min,
|
||||
OuterRef,
|
||||
Prefetch,
|
||||
Q,
|
||||
Sum,
|
||||
When,
|
||||
)
|
||||
from django.utils import timezone as django_timezone
|
||||
from tasks.jobs.queries import (
|
||||
COMPLIANCE_UPSERT_PROVIDER_SCORE_SQL,
|
||||
COMPLIANCE_UPSERT_TENANT_SUMMARY_SQL,
|
||||
)
|
||||
from tasks.utils import CustomEncoder
|
||||
from tasks.utils import CustomEncoder, batched
|
||||
|
||||
from api.compliance import PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE
|
||||
from api.constants import SEVERITY_ORDER
|
||||
@@ -2069,3 +2082,169 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
|
||||
"created": created_count,
|
||||
"updated": updated_count,
|
||||
}
|
||||
|
||||
|
||||
def reset_ephemeral_resource_findings_count(tenant_id: str, scan_id: str) -> dict:
|
||||
"""Zero failed_findings_count for resources missing from a completed full-scope scan.
|
||||
|
||||
Resources that exist in the database for the scan's provider but were not
|
||||
touched by this scan are treated as ephemeral. We keep their historical
|
||||
findings, but reset the denormalized counter that drives the Resources page
|
||||
sort so they stop ranking at the top.
|
||||
|
||||
Skipped (no-op) when:
|
||||
- The scan is not in COMPLETED state.
|
||||
- The scan ran with any scoping filter in scanner_args (partial scope).
|
||||
|
||||
Query design (must scale to 500k+ resources per provider):
|
||||
Phase 1 — collect ephemeral IDs with one anti-join read.
|
||||
Outer filter ``(tenant_id, provider_id, failed_findings_count > 0)``
|
||||
uses ``resources_tenant_provider_idx``. The correlated
|
||||
``NOT EXISTS`` subquery hits the implicit unique index
|
||||
``(tenant_id, scan_id, resource_id)`` on ``ResourceScanSummary``.
|
||||
``NOT EXISTS`` (vs ``NOT IN``) is null-safe and lets the planner
|
||||
choose between hash anti-join and indexed nested-loop anti-join.
|
||||
``.iterator(chunk_size=...)`` skips the queryset cache so memory
|
||||
stays bounded while streaming UUIDs.
|
||||
Phase 2 — UPDATE in fixed-size batches.
|
||||
One large UPDATE would hold row-exclusive locks for seconds and
|
||||
create a WAL spike. Batched UPDATEs by ``id__in`` (~1k rows each)
|
||||
hit the primary key, keep each lock window ~50ms, bound WAL chunks,
|
||||
and let other writers proceed between batches.
|
||||
``failed_findings_count__gt=0`` in the UPDATE is idempotent under
|
||||
concurrent scans and skips no-op rewrites.
|
||||
Reads use the primary DB, not the replica: ``ResourceScanSummary`` rows
|
||||
were written by the same scan task that triggered this one, so replica
|
||||
lag could falsely classify scanned resources as ephemeral.
|
||||
|
||||
Scope detection (``Scan.is_full_scope()``) derives the set of scoping
|
||||
scanner_args from ``prowler.lib.scan.scan.Scan.__init__`` via
|
||||
introspection, so the API can never drift from the SDK's filter
|
||||
contract. Imported scans are also rejected by trigger — they may only
|
||||
cover a partial slice of resources.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
scan = Scan.objects.filter(tenant_id=tenant_id, id=scan_id).first()
|
||||
|
||||
if scan is None:
|
||||
logger.warning(f"Scan {scan_id} not found")
|
||||
return {"status": "skipped", "reason": "scan not found"}
|
||||
|
||||
if scan.state != StateChoices.COMPLETED:
|
||||
logger.info(f"Scan {scan_id} not completed; skipping ephemeral reset")
|
||||
return {"status": "skipped", "reason": "scan not completed"}
|
||||
|
||||
if not scan.is_full_scope():
|
||||
logger.info(
|
||||
f"Scan {scan_id} ran with scoping filters; skipping ephemeral reset"
|
||||
)
|
||||
return {"status": "skipped", "reason": "partial scan scope"}
|
||||
|
||||
# Race protection: if a newer completed full-scope scan exists for this
|
||||
# provider, our ResourceScanSummary set is stale relative to the resources'
|
||||
# current failed_findings_count values (which the newer scan already
|
||||
# refreshed). Wiping based on the older scan would zero counts the newer
|
||||
# scan just set. Skip and let the newer scan's reset task do the work; if
|
||||
# this task was delayed in the queue, that's the correct outcome.
|
||||
# `completed_at__isnull=False` is required: Postgres orders NULL first in
|
||||
# DESC, so a sibling COMPLETED scan with a missing completed_at would sort
|
||||
# as "newest" and incorrectly cause us to skip.
|
||||
with rls_transaction(tenant_id):
|
||||
latest_full_scope_scan_id = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=scan.provider_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
completed_at__isnull=False,
|
||||
)
|
||||
.order_by("-completed_at", "-inserted_at")
|
||||
.values_list("id", flat=True)
|
||||
.first()
|
||||
)
|
||||
if latest_full_scope_scan_id != scan.id:
|
||||
logger.info(
|
||||
f"Scan {scan_id} is not the latest completed scan for provider "
|
||||
f"{scan.provider_id}; skipping ephemeral reset"
|
||||
)
|
||||
return {"status": "skipped", "reason": "newer scan exists"}
|
||||
|
||||
# Defensive gate: ResourceScanSummary rows are written by perform_prowler_scan
|
||||
# via best-effort bulk_create. If those writes failed silently (or the scan
|
||||
# genuinely produced resources but no summaries were persisted), the
|
||||
# ~Exists(in_scan) anti-join below would classify EVERY resource for this
|
||||
# provider as ephemeral and zero their counts. Bail loudly instead.
|
||||
with rls_transaction(tenant_id):
|
||||
summaries_present = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists()
|
||||
if scan.unique_resource_count > 0 and not summaries_present:
|
||||
logger.error(
|
||||
f"Scan {scan_id} reports {scan.unique_resource_count} unique "
|
||||
f"resources but no ResourceScanSummary rows are persisted; "
|
||||
f"skipping ephemeral reset to avoid wiping valid counts"
|
||||
)
|
||||
return {"status": "skipped", "reason": "summaries missing"}
|
||||
|
||||
# Stays on the primary DB intentionally. ResourceScanSummary rows are
|
||||
# written by perform_prowler_scan in the same chain that triggered this
|
||||
# task, so replica lag could return an empty/partial summary set; a stale
|
||||
# read here would classify every Resource as ephemeral and wipe valid
|
||||
# failed_findings_count values on the primary. Same rationale as
|
||||
# update_provider_compliance_scores below in this module.
|
||||
# Materializing the ID list (rather than streaming the iterator into
|
||||
# batched UPDATEs) is intentional: it lets the UPDATEs run in their own
|
||||
# short rls_transactions instead of one long transaction holding row locks
|
||||
# on every batch. At 500k UUIDs the peak memory is ~40 MB — acceptable for
|
||||
# a Celery worker — and is the better trade-off versus a multi-second
|
||||
# write-lock window blocking concurrent scans.
|
||||
with rls_transaction(tenant_id):
|
||||
in_scan = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=OuterRef("pk"),
|
||||
)
|
||||
ephemeral_ids = list(
|
||||
Resource.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=scan.provider_id,
|
||||
failed_findings_count__gt=0,
|
||||
)
|
||||
.filter(~Exists(in_scan))
|
||||
.values_list("id", flat=True)
|
||||
.iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
|
||||
)
|
||||
|
||||
if not ephemeral_ids:
|
||||
logger.info(f"No ephemeral resources for scan {scan_id}")
|
||||
return {
|
||||
"status": "completed",
|
||||
"scan_id": str(scan_id),
|
||||
"provider_id": str(scan.provider_id),
|
||||
"reset": 0,
|
||||
}
|
||||
|
||||
total_updated = 0
|
||||
for batch, _ in batched(ephemeral_ids, DJANGO_FINDINGS_BATCH_SIZE):
|
||||
# batched() always yields a final tuple, which is empty when the input
|
||||
# length is an exact multiple of the batch size. Skip it so we don't
|
||||
# issue a no-op UPDATE ... WHERE id IN ().
|
||||
if not batch:
|
||||
continue
|
||||
with rls_transaction(tenant_id):
|
||||
total_updated += Resource.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id__in=batch,
|
||||
failed_findings_count__gt=0,
|
||||
).update(failed_findings_count=0)
|
||||
|
||||
logger.info(
|
||||
f"Ephemeral resource reset for scan {scan_id}: "
|
||||
f"{total_updated} resources zeroed for provider {scan.provider_id}"
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"scan_id": str(scan_id),
|
||||
"provider_id": str(scan.provider_id),
|
||||
"reset": total_updated,
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
reset_ephemeral_resource_findings_count,
|
||||
update_provider_compliance_scores,
|
||||
)
|
||||
from tasks.utils import (
|
||||
@@ -77,6 +78,7 @@ from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@@ -158,6 +160,13 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
generate_outputs_task.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
# post-scan task — runs in the parallel group so a
|
||||
# failure cannot cascade into reports or integrations. Its only
|
||||
# prerequisite is that perform_prowler_scan has committed
|
||||
# ResourceScanSummary, which is true by the time this chain fires.
|
||||
reset_ephemeral_resource_findings_count_task.si(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
),
|
||||
),
|
||||
group(
|
||||
# Use optimized task that generates both reports with shared queries
|
||||
@@ -173,10 +182,25 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
).apply_async()
|
||||
|
||||
if can_provider_run_attack_paths_scan(tenant_id, provider_id):
|
||||
perform_attack_paths_scan_task.apply_async(
|
||||
# Row is normally created upstream, so this is a safeguard so we can attach the task id below
|
||||
attack_paths_scan = attack_paths_db_utils.retrieve_attack_paths_scan(
|
||||
tenant_id, scan_id
|
||||
)
|
||||
if attack_paths_scan is None:
|
||||
attack_paths_scan = attack_paths_db_utils.create_attack_paths_scan(
|
||||
tenant_id, scan_id, provider_id
|
||||
)
|
||||
|
||||
# Persist the Celery task id so the periodic cleanup can revoke scans stuck in SCHEDULED
|
||||
result = perform_attack_paths_scan_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
|
||||
if attack_paths_scan and result:
|
||||
attack_paths_db_utils.set_attack_paths_scan_task_id(
|
||||
tenant_id, attack_paths_scan.id, result.task_id
|
||||
)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@set_tenant
|
||||
@@ -378,7 +402,8 @@ class AttackPathsScanRLSTask(RLSTask):
|
||||
SDK initialization, or Neo4j configuration errors during setup).
|
||||
"""
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, _einfo):
|
||||
def on_failure(self, exc, task_id, args, kwargs, _einfo): # noqa: ARG002
|
||||
del args # Required by Celery's Task.on_failure signature; not used.
|
||||
tenant_id = kwargs.get("tenant_id")
|
||||
scan_id = kwargs.get("scan_id")
|
||||
|
||||
@@ -775,6 +800,32 @@ def aggregate_daily_severity_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_daily_severity(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="scan-reset-ephemeral-resources", queue="overview")
|
||||
@handle_provider_deletion
|
||||
def reset_ephemeral_resource_findings_count_task(tenant_id: str, scan_id: str):
|
||||
"""Reset failed_findings_count for resources missing from a completed full-scope scan.
|
||||
|
||||
Failures are swallowed and returned as a status: this task lives inside the
|
||||
post-scan group, and Celery propagates group-member exceptions into the next
|
||||
chain step — meaning a crash here would block compliance reports and
|
||||
integrations. The reset is purely cosmetic (UI sort optimization), so a
|
||||
bad run is logged and absorbed rather than allowed to cascade.
|
||||
"""
|
||||
try:
|
||||
return reset_ephemeral_resource_findings_count(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001 — intentionally broad
|
||||
logger.exception(
|
||||
f"reset_ephemeral_resource_findings_count failed for scan {scan_id}: {exc}"
|
||||
)
|
||||
return {
|
||||
"status": "failed",
|
||||
"scan_id": str(scan_id),
|
||||
"reason": str(exc),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="scan-finding-group-summaries", queue="overview")
|
||||
@set_tenant(keep_tenant=True)
|
||||
@handle_provider_deletion
|
||||
|
||||
@@ -135,7 +135,7 @@ class TestAttackPathsRun:
|
||||
assert result == ingestion_result
|
||||
mock_retrieve_scan.assert_called_once_with(str(tenant.id), str(scan.id))
|
||||
mock_starting.assert_called_once()
|
||||
config = mock_starting.call_args[0][2]
|
||||
config = mock_starting.call_args[0][1]
|
||||
assert config.neo4j_database == "tenant-db"
|
||||
mock_get_db_name.assert_has_calls(
|
||||
[call(attack_paths_scan.id, temporary=True), call(provider.tenant_id)]
|
||||
@@ -2732,3 +2732,143 @@ class TestCleanupStaleAttackPathsScans:
|
||||
assert result["cleaned_up_count"] == 2
|
||||
# Worker should be pinged exactly once — cache prevents second ping
|
||||
mock_alive.assert_called_once_with("shared-worker@host")
|
||||
|
||||
# `SCHEDULED` state cleanup
|
||||
def _create_scheduled_scan(
|
||||
self,
|
||||
tenant,
|
||||
provider,
|
||||
*,
|
||||
age_minutes,
|
||||
parent_state,
|
||||
with_task=True,
|
||||
):
|
||||
"""Create a SCHEDULED AttackPathsScan with a parent Scan in `parent_state`.
|
||||
|
||||
`age_minutes` controls how far in the past `started_at` is set, so
|
||||
callers can place rows safely past the cleanup cutoff.
|
||||
"""
|
||||
parent_scan = Scan.objects.create(
|
||||
name="Parent Prowler scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=parent_state,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
|
||||
ap_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
scan=parent_scan,
|
||||
state=StateChoices.SCHEDULED,
|
||||
started_at=datetime.now(tz=timezone.utc) - timedelta(minutes=age_minutes),
|
||||
)
|
||||
|
||||
task_result = None
|
||||
if with_task:
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id=str(ap_scan.id),
|
||||
task_name="attack-paths-scan-perform",
|
||||
status="PENDING",
|
||||
)
|
||||
task = Task.objects.create(
|
||||
id=task_result.task_id,
|
||||
task_runner_task=task_result,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
ap_scan.task = task
|
||||
ap_scan.save(update_fields=["task_id"])
|
||||
|
||||
return ap_scan, task_result
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._revoke_task")
|
||||
def test_cleans_up_scheduled_scan_when_parent_is_terminal(
|
||||
self,
|
||||
mock_revoke,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
ap_scan, task_result = self._create_scheduled_scan(
|
||||
tenant,
|
||||
provider,
|
||||
age_minutes=24 * 60 * 3, # 3 days, safely past any threshold
|
||||
parent_state=StateChoices.FAILED,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
assert str(ap_scan.id) in result["scan_ids"]
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.FAILED
|
||||
assert ap_scan.progress == 100
|
||||
assert ap_scan.completed_at is not None
|
||||
assert ap_scan.ingestion_exceptions == {
|
||||
"global_error": "Scan never started — cleaned up by periodic task"
|
||||
}
|
||||
|
||||
# SCHEDULED revoke must NOT terminate a running worker
|
||||
mock_revoke.assert_called_once()
|
||||
assert mock_revoke.call_args.kwargs == {"terminate": False}
|
||||
|
||||
# Temp DB never created for SCHEDULED, so no drop attempted
|
||||
mock_drop_db.assert_not_called()
|
||||
# Tenant Neo4j data is untouched in this path
|
||||
mock_recover.assert_not_called()
|
||||
|
||||
task_result.refresh_from_db()
|
||||
assert task_result.status == "FAILURE"
|
||||
assert task_result.date_done is not None
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._revoke_task")
|
||||
def test_skips_scheduled_scan_when_parent_still_in_flight(
|
||||
self,
|
||||
mock_revoke,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
ap_scan, _ = self._create_scheduled_scan(
|
||||
tenant,
|
||||
provider,
|
||||
age_minutes=24 * 60 * 3,
|
||||
parent_state=StateChoices.EXECUTING,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 0
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.SCHEDULED
|
||||
mock_revoke.assert_not_called()
|
||||
|
||||
@@ -24,6 +24,7 @@ from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
reset_ephemeral_resource_findings_count,
|
||||
update_provider_compliance_scores,
|
||||
)
|
||||
from tasks.utils import CustomEncoder
|
||||
@@ -35,6 +36,7 @@ from api.models import (
|
||||
MuteRule,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
ScanSummary,
|
||||
StateChoices,
|
||||
@@ -4335,3 +4337,315 @@ class TestUpdateProviderComplianceScores:
|
||||
assert any("provider_compliance_scores" in c for c in calls)
|
||||
assert any("tenant_compliance_summaries" in c for c in calls)
|
||||
assert any("pg_advisory_xact_lock" in c for c in calls)
|
||||
|
||||
|
||||
class TestScanIsFullScope:
|
||||
def _live_trigger(self):
|
||||
return Scan.TriggerChoices.MANUAL
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"scanner_args",
|
||||
[
|
||||
{},
|
||||
{"unrelated": "value"},
|
||||
{"checks": None},
|
||||
{"services": []},
|
||||
{"severities": ""},
|
||||
],
|
||||
)
|
||||
def test_full_scope_when_no_filters_present(self, scanner_args):
|
||||
scan = Scan(scanner_args=scanner_args, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is True
|
||||
|
||||
def test_full_scope_covers_every_sdk_kwarg(self):
|
||||
# Lock the predicate to whatever ProwlerScan's __init__ exposes today.
|
||||
# If the SDK adds a new filter, this test still passes via the
|
||||
# introspection-driven derivation; if it adds a non-filter kwarg
|
||||
# (e.g. provider-like), keep the exclusion list in sync in models.py.
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
import inspect
|
||||
|
||||
expected = tuple(
|
||||
name
|
||||
for name in inspect.signature(ProwlerScan.__init__).parameters
|
||||
if name not in ("self", "provider")
|
||||
)
|
||||
assert Scan.get_scoping_scanner_arg_keys() == expected
|
||||
# Spot-check a few well-known filters survive the introspection.
|
||||
assert "checks" in expected
|
||||
assert "services" in expected
|
||||
assert "severities" in expected
|
||||
|
||||
def test_partial_scope_for_each_sdk_filter(self):
|
||||
for key in Scan.get_scoping_scanner_arg_keys():
|
||||
scan = Scan(scanner_args={key: ["x"]}, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is False, f"{key} should mark scan as partial"
|
||||
|
||||
def test_imported_scan_is_never_full_scope(self):
|
||||
# Forward-defensive: any trigger outside LIVE_SCAN_TRIGGERS (e.g. a
|
||||
# future "imported" trigger) must never qualify, even with empty args.
|
||||
scan = Scan(scanner_args={}, trigger="imported")
|
||||
assert scan.is_full_scope() is False
|
||||
|
||||
def test_handles_none_scanner_args(self):
|
||||
scan = Scan(scanner_args=None, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestResetEphemeralResourceFindingsCount:
|
||||
def _make_scan_summary(self, tenant_id, scan_id, resource):
|
||||
return ResourceScanSummary.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=resource.id,
|
||||
service=resource.service,
|
||||
region=resource.region,
|
||||
resource_type=resource.type,
|
||||
)
|
||||
|
||||
def test_resets_only_resources_missing_from_full_scope_scan(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, scan2, *_ = scans_fixture
|
||||
resource1, resource2, resource3 = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=3)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
Resource.objects.filter(id=resource3.id).update(failed_findings_count=7)
|
||||
|
||||
# Only resource1 was scanned in scan1; resource2 is ephemeral.
|
||||
self._make_scan_summary(tenant.id, scan1.id, resource1)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 1
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
resource3.refresh_from_db()
|
||||
|
||||
assert resource1.failed_findings_count == 3
|
||||
assert resource2.failed_findings_count == 0
|
||||
# Other provider's resource is never touched.
|
||||
assert resource3.failed_findings_count == 7
|
||||
|
||||
def test_skips_when_scan_not_completed(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(state=StateChoices.EXECUTING)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "scan not completed"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_skips_when_scan_has_scoping_filters(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
_, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(scanner_args={"checks": ["check1"]})
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "partial scan scope"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_skips_when_scan_not_found(self, tenants_fixture):
|
||||
tenant, *_ = tenants_fixture
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(uuid.uuid4())
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "scan not found"
|
||||
|
||||
def test_skips_when_newer_scan_completed_for_same_provider(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
# If a newer completed scan exists for the same provider, our
|
||||
# ResourceScanSummary set is stale relative to the resources' current
|
||||
# counts, and applying the diff would corrupt them.
|
||||
from datetime import timedelta
|
||||
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
provider, *_ = providers_fixture
|
||||
_, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
# Create a newer COMPLETED scan for the same provider, with an
|
||||
# explicit completed_at strictly after scan1's so ordering is
|
||||
# deterministic regardless of clock resolution.
|
||||
newer_completed_at = scan1.completed_at + timedelta(minutes=5)
|
||||
Scan.objects.create(
|
||||
name="Newer Scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at=newer_completed_at,
|
||||
completed_at=newer_completed_at,
|
||||
)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "newer scan exists"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_does_not_touch_other_providers_resources(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
_, _, resource3 = resources_fixture
|
||||
|
||||
# resource3 belongs to provider2 with failed_findings_count > 0 and is
|
||||
# not in scan1's summary. It MUST NOT be reset.
|
||||
Resource.objects.filter(id=resource3.id).update(failed_findings_count=9)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 0
|
||||
|
||||
resource3.refresh_from_db()
|
||||
assert resource3.failed_findings_count == 9
|
||||
|
||||
def test_resources_already_zero_are_not_rewritten(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
# Both resources already at 0, neither in summary -> nothing to update.
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=0)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=0)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 0
|
||||
|
||||
def test_skips_when_summaries_missing_for_scan_with_resources(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
# Catastrophic guard: if a scan reports unique_resource_count > 0 but
|
||||
# no ResourceScanSummary rows are persisted (e.g. bulk_create silently
|
||||
# failed), the anti-join would classify EVERY resource as ephemeral
|
||||
# and zero their counts. The gate must skip and preserve the data.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(unique_resource_count=10)
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=3)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "summaries missing"
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
assert resource1.failed_findings_count == 3
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_ignores_sibling_scan_with_null_completed_at(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
# Postgres orders NULL first in DESC; a sibling COMPLETED scan with a
|
||||
# missing completed_at must not be treated as the latest scan and
|
||||
# cause us to incorrectly skip the reset.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
provider, *_ = providers_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
self._make_scan_summary(tenant.id, scan1.id, resource1)
|
||||
|
||||
Scan.objects.create(
|
||||
name="Ghost Scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at=scan1.completed_at,
|
||||
completed_at=None,
|
||||
)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 1
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 0
|
||||
|
||||
def test_batches_updates_when_many_ephemeral_resources(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
# Forces multiple batches to confirm the chunked UPDATE path executes
|
||||
# cleanly and the count is the sum across batches.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=2)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=4)
|
||||
|
||||
# No ResourceScanSummary -> both resource1 and resource2 are ephemeral.
|
||||
# Force a 1-row batch via the shared findings batch size knob.
|
||||
with patch("tasks.jobs.scan.DJANGO_FINDINGS_BATCH_SIZE", 1):
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 2
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
assert resource1.failed_findings_count == 0
|
||||
assert resource2.failed_findings_count == 0
|
||||
|
||||
@@ -842,6 +842,72 @@ class TestScanCompleteTasks:
|
||||
# Attack Paths task should be skipped when provider cannot run it
|
||||
mock_attack_paths_task.assert_not_called()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"row_pre_existing",
|
||||
[True, False],
|
||||
ids=["row-pre-existing", "row-missing-fallback"],
|
||||
)
|
||||
@patch("tasks.tasks.aggregate_attack_surface_task.apply_async")
|
||||
@patch("tasks.tasks.chain")
|
||||
@patch("tasks.tasks.create_compliance_requirements_task.si")
|
||||
@patch("tasks.tasks.update_provider_compliance_scores_task.si")
|
||||
@patch("tasks.tasks.perform_scan_summary_task.si")
|
||||
@patch("tasks.tasks.generate_outputs_task.si")
|
||||
@patch("tasks.tasks.generate_compliance_reports_task.si")
|
||||
@patch("tasks.tasks.check_integrations_task.si")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.set_attack_paths_scan_task_id")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.create_attack_paths_scan")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.retrieve_attack_paths_scan")
|
||||
@patch("tasks.tasks.perform_attack_paths_scan_task.apply_async")
|
||||
@patch("tasks.tasks.can_provider_run_attack_paths_scan", return_value=True)
|
||||
def test_scan_complete_dispatches_attack_paths_scan(
|
||||
self,
|
||||
_mock_can_run_attack_paths,
|
||||
mock_attack_paths_task,
|
||||
mock_retrieve,
|
||||
mock_create,
|
||||
mock_set_task_id,
|
||||
mock_check_integrations_task,
|
||||
mock_compliance_reports_task,
|
||||
mock_outputs_task,
|
||||
mock_scan_summary_task,
|
||||
mock_update_compliance_scores_task,
|
||||
mock_compliance_requirements_task,
|
||||
mock_chain,
|
||||
mock_attack_surface_task,
|
||||
row_pre_existing,
|
||||
):
|
||||
"""When a provider can run Attack Paths, dispatch must:
|
||||
1. Reuse the existing row or create one if missing.
|
||||
2. Call apply_async on the Attack Paths task.
|
||||
3. Persist the returned Celery task id on the row.
|
||||
"""
|
||||
existing_row = MagicMock(id="ap-scan-id")
|
||||
if row_pre_existing:
|
||||
mock_retrieve.return_value = existing_row
|
||||
else:
|
||||
mock_retrieve.return_value = None
|
||||
mock_create.return_value = existing_row
|
||||
|
||||
async_result = MagicMock(task_id="celery-task-id")
|
||||
mock_attack_paths_task.return_value = async_result
|
||||
|
||||
_perform_scan_complete_tasks("tenant-id", "scan-id", "provider-id")
|
||||
|
||||
mock_retrieve.assert_called_once_with("tenant-id", "scan-id")
|
||||
if row_pre_existing:
|
||||
mock_create.assert_not_called()
|
||||
else:
|
||||
mock_create.assert_called_once_with("tenant-id", "scan-id", "provider-id")
|
||||
|
||||
mock_attack_paths_task.assert_called_once_with(
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"}
|
||||
)
|
||||
|
||||
mock_set_task_id.assert_called_once_with(
|
||||
"tenant-id", "ap-scan-id", "celery-task-id"
|
||||
)
|
||||
|
||||
|
||||
class TestAttackPathsTasks:
|
||||
@staticmethod
|
||||
|
||||
@@ -121,8 +121,8 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.24.0"
|
||||
PROWLER_API_VERSION="5.24.0"
|
||||
PROWLER_UI_VERSION="5.25.1"
|
||||
PROWLER_API_VERSION="5.25.1"
|
||||
```
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -227,6 +227,7 @@ Assign administrative permissions by selecting from the following options:
|
||||
| Manage Integrations | All | Add or modify the Prowler Integrations. |
|
||||
| Manage Ingestions | Prowler Cloud | Allow or deny the ability to submit findings ingestion batches via the API. |
|
||||
| Manage Billing | Prowler Cloud | Access and manage billing settings and subscription information. |
|
||||
| Manage Alerts | Prowler Cloud | Create, edit, and delete alert rules and recipients. |
|
||||
|
||||
<Note>
|
||||
The **Scope** column indicates where each permission applies. **All** means the permission is available in both Prowler Cloud and Self-Managed deployments. **Prowler Cloud** indicates permissions that are specific to [Prowler Cloud](https://cloud.prowler.com/sign-in).
|
||||
@@ -241,3 +242,5 @@ The following permissions are available exclusively in **Prowler Cloud**:
|
||||
**Manage Ingestions:** Submit and manage findings ingestion jobs via the API. Required to upload OCSF scan results using the `--push-to-cloud` CLI flag or the ingestion endpoints. See [Import Findings](/user-guide/tutorials/prowler-app-import-findings) for details.
|
||||
|
||||
**Manage Billing:** Access and manage billing settings, subscription plans, and payment methods.
|
||||
|
||||
**Manage Alerts:** Create, edit, and delete alert rules and recipients used to deliver scan-result digests via email.
|
||||
|
||||
@@ -2,6 +2,42 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.26.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- `bedrock_guardrails_configured` check for AWS provider [(#10844)](https://github.com/prowler-cloud/prowler/pull/10844)
|
||||
- Universal compliance pipeline integrated into the CLI: `--list-compliance` and `--list-compliance-requirements` show universal frameworks, and CSV plus OCSF outputs are generated for any framework declaring a `TableConfig` [(#10301)](https://github.com/prowler-cloud/prowler/pull/10301)
|
||||
- ASD Essential Eight Maturity Model compliance framework for AWS (Maturity Level One, Nov 2023) [(#10808)](https://github.com/prowler-cloud/prowler/pull/10808)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- `route53_dangling_ip_subdomain_takeover` now also flags `CNAME` records pointing to S3 website endpoints whose buckets are missing from the account [(#10920)](https://github.com/prowler-cloud/prowler/pull/10920)
|
||||
- Azure Network Watcher flow log checks now require workspace-backed Traffic Analytics for `network_flow_log_captured_sent` and align metadata with VNet-compatible flow log guidance [(#10645)](https://github.com/prowler-cloud/prowler/pull/10645)
|
||||
- Azure compliance entries for legacy Network Watcher flow log controls now use retirement-aware guidance and point new deployments to VNet flow logs
|
||||
- AWS CodeBuild service now batches `BatchGetProjects` and `BatchGetBuilds` calls per region (up to 100 items per call) to reduce API call volume and prevent throttling-induced false positives in `codebuild_project_not_publicly_accessible` [(#10639)](https://github.com/prowler-cloud/prowler/pull/10639)
|
||||
- `display_compliance_table` dispatch switched from substring `in` checks to `startswith` to prevent false matches between similarly named frameworks (e.g. `cisa` vs `cis`) [(#10301)](https://github.com/prowler-cloud/prowler/pull/10301)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- AWS SDK test isolation: autouse `mock_aws` fixture and leak detector in `conftest.py` to prevent tests from hitting real AWS endpoints, with idempotent organization setup for tests calling `set_mocked_aws_provider` multiple times [(#10605)](https://github.com/prowler-cloud/prowler/pull/10605)
|
||||
- AWS `boto` user agent extra is now applied to every client [(#10944)](https://github.com/prowler-cloud/prowler/pull/10944)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- Parser-mismatch SSRF in image provider registry auth where crafted bearer-token realms and pagination links could force requests to internal addresses and leak credentials cross-origin [(#10945)](https://github.com/prowler-cloud/prowler/pull/10945)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.1] (Prowler v5.25.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- `KeyError` when generating compliance outputs after the CLI scan [#10919](https://github.com/prowler-cloud/prowler/pull/10919)
|
||||
- Kubernetes OCSF `provider_uid` now uses the cluster name in in-cluster mode (so `--cluster-name` is correctly reflected in findings) and keeps the kubeconfig context in kubeconfig mode [(#10483)](https://github.com/prowler-cloud/prowler/pull/10483)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.0] (Prowler v5.25.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
+56
-7
@@ -45,7 +45,10 @@ from prowler.lib.check.check import (
|
||||
)
|
||||
from prowler.lib.check.checks_loader import load_checks_to_execute
|
||||
from prowler.lib.check.compliance import update_checks_metadata_with_compliance
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.check.compliance_models import (
|
||||
Compliance,
|
||||
get_bulk_compliance_frameworks_universal,
|
||||
)
|
||||
from prowler.lib.check.custom_checks_metadata import (
|
||||
parse_custom_checks_metadata_file,
|
||||
update_checks_metadata,
|
||||
@@ -75,7 +78,10 @@ from prowler.lib.outputs.compliance.cis.cis_oraclecloud import OracleCloudCIS
|
||||
from prowler.lib.outputs.compliance.cisa_scuba.cisa_scuba_googleworkspace import (
|
||||
GoogleWorkspaceCISASCuBA,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
from prowler.lib.outputs.compliance.compliance import (
|
||||
display_compliance_table,
|
||||
process_universal_compliance_frameworks,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.csa.csa_alibabacloud import AlibabaCloudCSA
|
||||
from prowler.lib.outputs.compliance.csa.csa_aws import AWSCSA
|
||||
from prowler.lib.outputs.compliance.csa.csa_azure import AzureCSA
|
||||
@@ -84,6 +90,9 @@ from prowler.lib.outputs.compliance.csa.csa_oraclecloud import OracleCloudCSA
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_azure import AzureENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_gcp import GCPENS
|
||||
from prowler.lib.outputs.compliance.essential_eight.essential_eight_aws import (
|
||||
EssentialEightAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_aws import AWSISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_azure import AzureISO27001
|
||||
@@ -235,6 +244,8 @@ def prowler():
|
||||
# Load compliance frameworks
|
||||
logger.debug("Loading compliance frameworks from .json files")
|
||||
|
||||
universal_frameworks = {}
|
||||
|
||||
# Skip compliance frameworks for external-tool providers
|
||||
if provider not in EXTERNAL_TOOL_PROVIDERS:
|
||||
bulk_compliance_frameworks = Compliance.get_bulk(provider)
|
||||
@@ -242,6 +253,8 @@ def prowler():
|
||||
bulk_checks_metadata = update_checks_metadata_with_compliance(
|
||||
bulk_compliance_frameworks, bulk_checks_metadata
|
||||
)
|
||||
# Load universal compliance frameworks for new rendering pipeline
|
||||
universal_frameworks = get_bulk_compliance_frameworks_universal(provider)
|
||||
|
||||
# Update checks metadata if the --custom-checks-metadata-file is present
|
||||
custom_checks_metadata = None
|
||||
@@ -254,12 +267,12 @@ def prowler():
|
||||
)
|
||||
|
||||
if args.list_compliance:
|
||||
print_compliance_frameworks(bulk_compliance_frameworks)
|
||||
all_frameworks = {**bulk_compliance_frameworks, **universal_frameworks}
|
||||
print_compliance_frameworks(all_frameworks)
|
||||
sys.exit()
|
||||
if args.list_compliance_requirements:
|
||||
print_compliance_requirements(
|
||||
bulk_compliance_frameworks, args.list_compliance_requirements
|
||||
)
|
||||
all_frameworks = {**bulk_compliance_frameworks, **universal_frameworks}
|
||||
print_compliance_requirements(all_frameworks, args.list_compliance_requirements)
|
||||
sys.exit()
|
||||
|
||||
# Load checks to execute
|
||||
@@ -276,6 +289,7 @@ def prowler():
|
||||
provider=provider,
|
||||
list_checks=getattr(args, "list_checks", False)
|
||||
or getattr(args, "list_checks_json", False),
|
||||
universal_frameworks=universal_frameworks,
|
||||
)
|
||||
|
||||
# if --list-checks-json, dump a json file and exit
|
||||
@@ -624,9 +638,29 @@ def prowler():
|
||||
)
|
||||
|
||||
# Compliance Frameworks
|
||||
# Source the framework listing from the union of `bulk_compliance_frameworks`
|
||||
# and `universal_frameworks` so universal-only frameworks (e.g.
|
||||
# `prowler/compliance/csa_ccm_4.0.json`) — which `Compliance.get_bulk(provider)`
|
||||
# does not load — still reach `process_universal_compliance_frameworks` below.
|
||||
# The provider-specific block subtracts the names handled by the universal
|
||||
# processor so the legacy per-provider handlers only see frameworks that the
|
||||
# bulk loader actually resolved.
|
||||
input_compliance_frameworks = set(output_options.output_modes).intersection(
|
||||
get_available_compliance_frameworks(provider)
|
||||
set(bulk_compliance_frameworks.keys()) | set(universal_frameworks.keys())
|
||||
)
|
||||
|
||||
# ── Universal compliance frameworks (provider-agnostic) ──
|
||||
universal_processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks=input_compliance_frameworks,
|
||||
universal_frameworks=universal_frameworks,
|
||||
finding_outputs=finding_outputs,
|
||||
output_directory=output_options.output_directory,
|
||||
output_filename=output_options.output_filename,
|
||||
provider=provider,
|
||||
generated_outputs=generated_outputs,
|
||||
)
|
||||
input_compliance_frameworks -= universal_processed
|
||||
|
||||
if provider == "aws":
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if compliance_name.startswith("cis_"):
|
||||
@@ -642,6 +676,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(cis)
|
||||
cis.batch_write_data_to_file()
|
||||
elif compliance_name.startswith("essential_eight"):
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
essential_eight = EssentialEightAWS(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(essential_eight)
|
||||
essential_eight.batch_write_data_to_file()
|
||||
elif compliance_name == "mitre_attack_aws":
|
||||
# Generate MITRE ATT&CK Finding Object
|
||||
filename = (
|
||||
@@ -1396,6 +1442,9 @@ def prowler():
|
||||
output_options.output_filename,
|
||||
output_options.output_directory,
|
||||
compliance_overview,
|
||||
universal_frameworks=universal_frameworks,
|
||||
provider=provider,
|
||||
output_formats=args.output_formats,
|
||||
)
|
||||
if compliance_overview:
|
||||
print(
|
||||
|
||||
@@ -6426,9 +6426,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6485,9 +6485,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6546,8 +6546,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6606,8 +6606,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2894,6 +2894,7 @@
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_guardrails_configured",
|
||||
"bedrock_model_invocation_logging_enabled",
|
||||
"bedrock_model_invocation_logs_encryption_enabled",
|
||||
"cloudformation_stack_outputs_find_secrets",
|
||||
|
||||
@@ -2898,6 +2898,7 @@
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_guardrails_configured",
|
||||
"bedrock_model_invocation_logging_enabled",
|
||||
"bedrock_model_invocation_logs_encryption_enabled",
|
||||
"cloudformation_stack_outputs_find_secrets",
|
||||
|
||||
@@ -2276,9 +2276,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting thegeneration of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select NSG flow logs. 3. Select + Create. 4. Select the desired Subscription. 5. Select + Select NSG. 6. Select a network security group. 7. Click Confirm selection. 8. Select or create a new Storage Account. 9. Input the retention in days to retain the log. 10. Click Next. 11. Under Configuration, select Version 2. 12. If rich analytics are required, select Enable Traffic Analytics, a processing interval, and a Log Analytics Workspace. 13. Select Next. 14. Optionally add Tags. 15. Select Review + create. 16. Select Create. Warning The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select NSG flow logs 3. For each log you wish to audit select it from this view.",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "From Azure Portal Existing NSG flow logs can still be reviewed under Network Watcher > Flow logs. If you already have NSG flow logs configured, ensure they remain enabled and that Traffic Analytics sends data to a Log Analytics Workspace until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create Virtual network flow logs instead: 1. Navigate to Network Watcher. 2. Select Flow logs. 3. Select + Create. 4. Select the desired Subscription. 5. For Flow log type, select Virtual network. 6. Select + Select target resource. 7. Select a virtual network. 8. Click Confirm selection. 9. Select or create a new Storage Account. 10. Input the retention in days to retain the log. 11. Click Next. 12. Under Analytics, select Version 2, enable Traffic Analytics, and select a Log Analytics Workspace. 13. Select Next. 14. Optionally add Tags. 15. Select Review + create. 16. Select Create.",
|
||||
"AuditProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select Flow logs. 3. Review existing Network security group flow logs, if any remain, to ensure they are enabled and configured to send logs to a Log Analytics Workspace. 4. Review Virtual network flow logs for new or migrated coverage.",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2702,9 +2702,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "From Azure Portal 1. Go to Network Watcher 2. Select NSG flow logs blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure Status is set to On 5. Ensure Retention (days) setting greater than 90 days 6. Select your storage account in the Storage account field 7. Select Save From Azure CLI Enable the NSG flow logs and set the Retention (days) to greater than or equal to 90 days. az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 -- storage-account <NameorID of the storage account to save flow logs>",
|
||||
"AuditProcedure": "From Azure Portal 1. Go to Network Watcher 2. Select NSG flow logs blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure Status is set to On 5. Ensure Retention (days) setting greater than 90 days From Azure CLI az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' Ensure that enabled is set to true and days is set to greater then or equal to 90.",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "From Azure Portal Existing NSG flow logs can still be reviewed under Network Watcher > Flow logs. If you already have NSG flow logs configured, ensure Status is set to On and Retention (days) is set to 0, 90, or a number greater than 90 until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure Virtual network flow logs instead and set Retention days to 0, 90, or a number greater than 90. From Azure CLI Update an existing flow log retention policy with az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days>.",
|
||||
"AuditProcedure": "From Azure Portal 1. Go to Network Watcher. 2. Select Flow logs. 3. Review existing Network security group flow logs, if any remain, and ensure Status is set to On and Retention (days) is set to 0, 90, or a number greater than 90. 4. Review Virtual network flow logs for new or migrated coverage. From Azure CLI az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] Ensure each relevant flow log has retention days set to 0, 90, or a number greater than 90.",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are disabled.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -2241,9 +2241,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `NSG flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. Select `+ Select NSG`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. Input the retention in days to retain the log. 1. Click `Next`. 1. Under `Configuration`, select `Version 2`. 1. If rich analytics are required, select `Enable Traffic Analytics`, a processing interval, and a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `NSG flow logs` 1. For each log you wish to audit select it from this view. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**From Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. Input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2627,9 +2627,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**From Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` 6. Select your storage account in the `Storage account` field 7. Select `Save` **From Azure CLI** Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days. ``` az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs> ```",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` **From Azure CLI** ``` az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' ``` Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**From Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`. **From Azure CLI** Update an existing flow log retention policy with: ``` az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days> ```",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Go to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`. 1. Review `Virtual network` flow logs for new or migrated coverage. **From Azure CLI** ``` az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] ``` Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -2548,9 +2548,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace.This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Select `+ Create`.1. Select the desired Subscription.1. For `Flow log type`, select `Network security group`.1. Select `+ Select target resource`.1. Select `Network security group`.1. Select a network security group.1. Click `Confirm selection`.1. Select or create a new Storage Account.1. If using a v2 storage account, input the retention in days to retain the log.1. Click `Next`.1. Under `Analytics`, for `Flow log version`, select `Version 2`.1. Check the box next to `Enable traffic analytics`.1. Select a processing interval.1. Select a `Log Analytics Workspace`.1. Select `Next`.1. Optionally add Tags.1. Select `Review + create`.1. Select `Create`.***Warning***The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Click `Add filter`.1. From the `Filter` drop-down, select `Flow log type`.1. From the `Value` drop-down, check `Network security group` only.1. Click `Apply`.1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state'- **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group'- **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Select `+ Create`.1. Select the desired Subscription.1. For `Flow log type`, select `Virtual network`.1. Select `+ Select target resource`.1. Select `Virtual network`.1. Select a virtual network.1. Click `Confirm selection`.1. Select or create a new Storage Account.1. If using a v2 storage account, input the retention in days to retain the log.1. Click `Next`.1. Under `Analytics`, for `Flow log version`, select `Version 2`.1. Check the box next to `Enable traffic analytics`.1. Select a processing interval.1. Select a `Log Analytics Workspace`.1. Select `Next`.1. Optionally add Tags.1. Select `Review + create`.1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Click `Add filter`.1. From the `Filter` drop-down, select `Flow log type`.1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`.1. Review `Virtual network` flow logs for new or migrated coverage.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state'- **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group'- **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2934,9 +2934,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal**1. Go to `Network Watcher`2. Select `NSG flow logs` blade in the Logs section3. Select each Network Security Group from the list4. Ensure `Status` is set to `On`5. Ensure `Retention (days)` setting `greater than 90 days`6. Select your storage account in the `Storage account` field7. Select `Save`**Remediate from Azure CLI**Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days.```az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs>```",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Go to `Network Watcher`2. Select `NSG flow logs` blade in the Logs section3. Select each Network Security Group from the list4. Ensure `Status` is set to `On`5. Ensure `Retention (days)` setting `greater than 90 days`**Audit from Azure CLI**```az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy'```Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`.**Remediate from Azure CLI**Update an existing flow log retention policy with:```az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days>```",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Go to `Network Watcher`.1. Select `Flow logs`.1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`.1. Review `Virtual network` flow logs for new or migrated coverage.**Audit from Azure CLI**```az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId]```Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -1302,9 +1302,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow logs are captured and sent to Log Analytics` in this section.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Network security group`. 1. Select `+ Select target resource`. 1. Select `Network security group`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. From the `Value` drop-down, check `Network security group` only. 1. Click `Apply`. 1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics."
|
||||
}
|
||||
@@ -1789,9 +1789,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow log retention days is set to greater than or equal to 90` in this section.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` 6. Select your storage account in the `Storage account` field 7. Select `Save` **Remediate from Azure CLI** Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days. ``` az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs> ```",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` **Audit from Azure CLI** ``` az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' ``` Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`. **Remediate from Azure CLI** Update an existing flow log retention policy with: ``` az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days> ```",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Go to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure CLI** ``` az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] ``` Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`."
|
||||
}
|
||||
|
||||
@@ -1292,9 +1292,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow logs are captured and sent to Log Analytics` in this section.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Network security group`. 1. Select `+ Select target resource`. 1. Select `Network security group`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. From the `Value` drop-down, check `Network security group` only. 1. Click `Apply`. 1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics."
|
||||
}
|
||||
|
||||
@@ -709,17 +709,17 @@
|
||||
},
|
||||
{
|
||||
"Id": "3.1.8",
|
||||
"Description": "Ensure that Network Security Group Flow logs are captured and sent to Log Analytics",
|
||||
"Description": "Ensure that Network Watcher flow logs are captured and sent to Log Analytics",
|
||||
"Checks": [
|
||||
"network_flow_log_captured_sent"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Network Security Group Flow logs are captured and sent to Log Analytics",
|
||||
"Title": "Network Watcher flow logs are captured and sent to Log Analytics",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Ensure that network flow logs are collected and sent to a central Log Analytics workspace for monitoring and analysis.",
|
||||
"AdditionalInformation": "Capturing network flow logs provides visibility into traffic patterns across your network, helping detect anomalies, potential lateral movement, and security threats. These logs integrate with Azure Monitor and Azure Sentinel, enabling advanced analytics and visualization for improved network security and incident response.",
|
||||
"AttributeDescription": "Ensure that Network Watcher flow logs for supported targets, such as virtual networks and network security groups, are collected and sent to a central Log Analytics workspace for monitoring and analysis.",
|
||||
"AdditionalInformation": "Capturing Network Watcher flow logs provides visibility into traffic patterns across your network, helping detect anomalies, potential lateral movement, and security threats. These logs integrate with Azure Monitor and Azure Sentinel, enabling advanced analytics and visualization for improved network security and incident response. For new deployments, prefer virtual network flow logs because NSG flow logs are on the retirement path.",
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
@@ -763,17 +763,17 @@
|
||||
},
|
||||
{
|
||||
"Id": "3.2.1",
|
||||
"Description": "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'",
|
||||
"Description": "Ensure that Network Watcher flow log retention period is '0 or at least 90 days'",
|
||||
"Checks": [
|
||||
"network_flow_log_more_than_90_days"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Network Security Group Flow Log retention period is 'greater than 90 days'",
|
||||
"Title": "Network Watcher flow log retention period is '0 or at least 90 days'",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Enable Network Security Group (NSG) Flow Logs and configure the retention period to at least 90 days to capture and store IP traffic data for security monitoring and analysis.",
|
||||
"AdditionalInformation": "NSG Flow Logs provide visibility into network traffic, helping detect anomalies, unauthorized access, and potential security breaches. Retaining logs for at least 90 days ensures that historical data is available for incident investigation, compliance, and forensic analysis, strengthening overall network security monitoring.",
|
||||
"AttributeDescription": "Enable Network Watcher flow logs for supported targets, such as virtual networks and network security groups, and configure the retention period to 0 for unlimited retention or at least 90 days to capture and store IP traffic data for security monitoring and analysis.",
|
||||
"AdditionalInformation": "Network Watcher flow logs provide visibility into network traffic, helping detect anomalies, unauthorized access, and potential security breaches. Retaining logs for 0 days (unlimited) or at least 90 days ensures that historical data is available for incident investigation, compliance, and forensic analysis, strengthening overall network security monitoring. For new deployments, prefer virtual network flow logs because NSG flow logs are on the retirement path.",
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ class _MutableTimestamp:
|
||||
|
||||
timestamp = _MutableTimestamp(datetime.today())
|
||||
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
|
||||
prowler_version = "5.25.0"
|
||||
prowler_version = "5.26.0"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
@@ -87,8 +87,8 @@ def get_available_compliance_frameworks(provider=None):
|
||||
providers = [p.value for p in Provider]
|
||||
if provider:
|
||||
providers = [provider]
|
||||
for provider in providers:
|
||||
compliance_dir = f"{actual_directory}/../compliance/{provider}"
|
||||
for current_provider in providers:
|
||||
compliance_dir = f"{actual_directory}/../compliance/{current_provider}"
|
||||
if not os.path.isdir(compliance_dir):
|
||||
continue
|
||||
with os.scandir(compliance_dir) as files:
|
||||
@@ -97,7 +97,9 @@ def get_available_compliance_frameworks(provider=None):
|
||||
available_compliance_frameworks.append(
|
||||
file.name.removesuffix(".json")
|
||||
)
|
||||
# Also scan top-level compliance/ for multi-provider JSONs
|
||||
# Also scan top-level compliance/ for multi-provider (universal) JSONs.
|
||||
# When a specific provider was requested, only include the framework if it
|
||||
# declares support for that provider; otherwise include all universal frameworks.
|
||||
compliance_root = f"{actual_directory}/../compliance"
|
||||
if os.path.isdir(compliance_root):
|
||||
with os.scandir(compliance_root) as files:
|
||||
|
||||
@@ -299,12 +299,22 @@ def print_compliance_frameworks(
|
||||
def print_compliance_requirements(
|
||||
bulk_compliance_frameworks: dict, compliance_frameworks: list
|
||||
):
|
||||
from prowler.lib.check.compliance_models import ComplianceFramework
|
||||
|
||||
for compliance_framework in compliance_frameworks:
|
||||
for key in bulk_compliance_frameworks.keys():
|
||||
framework = bulk_compliance_frameworks[key].Framework
|
||||
provider = bulk_compliance_frameworks[key].Provider
|
||||
version = bulk_compliance_frameworks[key].Version
|
||||
requirements = bulk_compliance_frameworks[key].Requirements
|
||||
entry = bulk_compliance_frameworks[key]
|
||||
is_universal = isinstance(entry, ComplianceFramework)
|
||||
if is_universal:
|
||||
framework = entry.framework
|
||||
provider = entry.provider or "Multi-provider"
|
||||
version = entry.version
|
||||
requirements = entry.requirements
|
||||
else:
|
||||
framework = entry.Framework
|
||||
provider = entry.Provider or "Multi-provider"
|
||||
version = entry.Version
|
||||
requirements = entry.Requirements
|
||||
# We can list the compliance requirements for a given framework using the
|
||||
# bulk_compliance_frameworks keys since they are the compliance specification file name
|
||||
if compliance_framework == key:
|
||||
@@ -313,10 +323,23 @@ def print_compliance_requirements(
|
||||
)
|
||||
for requirement in requirements:
|
||||
checks = ""
|
||||
for check in requirement.Checks:
|
||||
checks += f" {Fore.YELLOW}\t\t{check}\n{Style.RESET_ALL}"
|
||||
if is_universal:
|
||||
req_checks = requirement.checks
|
||||
req_id = requirement.id
|
||||
req_description = requirement.description
|
||||
else:
|
||||
req_checks = requirement.Checks
|
||||
req_id = requirement.Id
|
||||
req_description = requirement.Description
|
||||
if isinstance(req_checks, dict):
|
||||
for prov, check_list in req_checks.items():
|
||||
for check in check_list:
|
||||
checks += f" {Fore.YELLOW}\t\t[{prov}] {check}\n{Style.RESET_ALL}"
|
||||
else:
|
||||
for check in req_checks:
|
||||
checks += f" {Fore.YELLOW}\t\t{check}\n{Style.RESET_ALL}"
|
||||
print(
|
||||
f"Requirement Id: {Fore.MAGENTA}{requirement.Id}{Style.RESET_ALL}\n\t- Description: {requirement.Description}\n\t- Checks:\n{checks}"
|
||||
f"Requirement Id: {Fore.MAGENTA}{req_id}{Style.RESET_ALL}\n\t- Description: {req_description}\n\t- Checks:\n{checks}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ def load_checks_to_execute(
|
||||
categories: set = None,
|
||||
resource_groups: set = None,
|
||||
list_checks: bool = False,
|
||||
universal_frameworks: dict = None,
|
||||
) -> set:
|
||||
"""Generate the list of checks to execute based on the cloud provider and the input arguments given"""
|
||||
try:
|
||||
@@ -155,12 +156,21 @@ def load_checks_to_execute(
|
||||
if not bulk_compliance_frameworks:
|
||||
bulk_compliance_frameworks = Compliance.get_bulk(provider=provider)
|
||||
for compliance_framework in compliance_frameworks:
|
||||
checks_to_execute.update(
|
||||
CheckMetadata.list(
|
||||
bulk_compliance_frameworks=bulk_compliance_frameworks,
|
||||
compliance_framework=compliance_framework,
|
||||
# Try universal frameworks first (snake_case dict-keyed checks)
|
||||
if (
|
||||
universal_frameworks
|
||||
and compliance_framework in universal_frameworks
|
||||
):
|
||||
fw = universal_frameworks[compliance_framework]
|
||||
for req in fw.requirements:
|
||||
checks_to_execute.update(req.checks.get(provider.lower(), []))
|
||||
elif compliance_framework in bulk_compliance_frameworks:
|
||||
checks_to_execute.update(
|
||||
CheckMetadata.list(
|
||||
bulk_compliance_frameworks=bulk_compliance_frameworks,
|
||||
compliance_framework=compliance_framework,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Handle if there are categories passed using --categories
|
||||
elif categories:
|
||||
|
||||
@@ -102,6 +102,48 @@ class CIS_Requirement_Attribute(BaseModel):
|
||||
References: str
|
||||
|
||||
|
||||
class EssentialEight_Requirement_Attribute_MaturityLevel(str, Enum):
|
||||
"""ASD Essential Eight Maturity Level"""
|
||||
|
||||
ML1 = "ML1"
|
||||
ML2 = "ML2"
|
||||
ML3 = "ML3"
|
||||
|
||||
|
||||
class EssentialEight_Requirement_Attribute_AssessmentStatus(str, Enum):
|
||||
"""Essential Eight Requirement Attribute Assessment Status"""
|
||||
|
||||
Manual = "Manual"
|
||||
Automated = "Automated"
|
||||
|
||||
|
||||
class EssentialEight_Requirement_Attribute_CloudApplicability(str, Enum):
|
||||
"""How well the ASD control maps to AWS cloud infrastructure."""
|
||||
|
||||
Full = "full"
|
||||
Partial = "partial"
|
||||
Limited = "limited"
|
||||
NonApplicable = "non-applicable"
|
||||
|
||||
|
||||
# Essential Eight Requirement Attribute
|
||||
class EssentialEight_Requirement_Attribute(BaseModel):
|
||||
"""ASD Essential Eight Requirement Attribute"""
|
||||
|
||||
Section: str
|
||||
MaturityLevel: EssentialEight_Requirement_Attribute_MaturityLevel
|
||||
AssessmentStatus: EssentialEight_Requirement_Attribute_AssessmentStatus
|
||||
CloudApplicability: EssentialEight_Requirement_Attribute_CloudApplicability
|
||||
MitigatedThreats: list[str]
|
||||
Description: str
|
||||
RationaleStatement: str
|
||||
ImpactStatement: str
|
||||
RemediationProcedure: str
|
||||
AuditProcedure: str
|
||||
AdditionalInformation: str
|
||||
References: str
|
||||
|
||||
|
||||
# Well Architected Requirement Attribute
|
||||
class AWS_Well_Architected_Requirement_Attribute(BaseModel):
|
||||
"""AWS Well Architected Requirement Attribute"""
|
||||
@@ -250,6 +292,7 @@ class Compliance_Requirement(BaseModel):
|
||||
Name: Optional[str] = None
|
||||
Attributes: list[
|
||||
Union[
|
||||
EssentialEight_Requirement_Attribute,
|
||||
CIS_Requirement_Attribute,
|
||||
ENS_Requirement_Attribute,
|
||||
ISO27001_2013_Requirement_Attribute,
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
import sys
|
||||
|
||||
from prowler.lib.check.models import Check_Report
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.compliance.c5.c5 import get_c5_table
|
||||
from prowler.lib.outputs.compliance.ccc.ccc import get_ccc_table
|
||||
from prowler.lib.outputs.compliance.cis.cis import get_cis_table
|
||||
from prowler.lib.outputs.compliance.compliance_check import ( # noqa: F401 - re-export for backward compatibility
|
||||
get_check_compliance,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.csa.csa import get_csa_table
|
||||
from prowler.lib.outputs.compliance.ens.ens import get_ens_table
|
||||
from prowler.lib.outputs.compliance.essential_eight.essential_eight import (
|
||||
get_essential_eight_table,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.generic.generic_table import (
|
||||
get_generic_compliance_table,
|
||||
)
|
||||
@@ -17,6 +22,94 @@ from prowler.lib.outputs.compliance.mitre_attack.mitre_attack import (
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore import (
|
||||
get_prowler_threatscore_table,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.universal_table import get_universal_table
|
||||
|
||||
|
||||
def process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks: set,
|
||||
universal_frameworks: dict,
|
||||
finding_outputs: list,
|
||||
output_directory: str,
|
||||
output_filename: str,
|
||||
provider: str,
|
||||
generated_outputs: dict,
|
||||
) -> set:
|
||||
"""Process universal compliance frameworks, generating CSV and OCSF outputs.
|
||||
|
||||
For each framework in *input_compliance_frameworks* that exists in
|
||||
*universal_frameworks* and has an outputs.table_config, this function
|
||||
creates both a CSV (UniversalComplianceOutput) and an OCSF JSON
|
||||
(OCSFComplianceOutput) file. OCSF is always generated regardless of
|
||||
the user's ``--output-formats`` flag.
|
||||
|
||||
The function is idempotent: it tracks already-created writers via
|
||||
``generated_outputs["compliance"]`` keyed by ``file_path``. If invoked
|
||||
again for the same framework (e.g. once per streaming batch), it
|
||||
reuses the existing writer instead of recreating it. This guarantees
|
||||
one output writer per framework for the whole execution and keeps
|
||||
the OCSF JSON array valid across multiple calls.
|
||||
|
||||
Returns the set of framework names that were processed so the caller
|
||||
can remove them before entering the legacy per-provider output loop.
|
||||
"""
|
||||
from prowler.lib.outputs.compliance.universal.ocsf_compliance import (
|
||||
OCSFComplianceOutput,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.universal_output import (
|
||||
UniversalComplianceOutput,
|
||||
)
|
||||
|
||||
existing_writers = {
|
||||
getattr(out, "file_path", None): out
|
||||
for out in generated_outputs.get("compliance", [])
|
||||
if isinstance(out, (UniversalComplianceOutput, OCSFComplianceOutput))
|
||||
}
|
||||
|
||||
processed = set()
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if not (
|
||||
compliance_name in universal_frameworks
|
||||
and universal_frameworks[compliance_name].outputs
|
||||
and universal_frameworks[compliance_name].outputs.table_config
|
||||
):
|
||||
continue
|
||||
|
||||
fw = universal_frameworks[compliance_name]
|
||||
|
||||
# CSV output
|
||||
csv_path = (
|
||||
f"{output_directory}/compliance/" f"{output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
if csv_path not in existing_writers:
|
||||
output = UniversalComplianceOutput(
|
||||
findings=finding_outputs,
|
||||
framework=fw,
|
||||
file_path=csv_path,
|
||||
provider=provider,
|
||||
)
|
||||
generated_outputs["compliance"].append(output)
|
||||
existing_writers[csv_path] = output
|
||||
output.batch_write_data_to_file()
|
||||
|
||||
# OCSF output (always generated for universal frameworks)
|
||||
ocsf_path = (
|
||||
f"{output_directory}/compliance/"
|
||||
f"{output_filename}_{compliance_name}.ocsf.json"
|
||||
)
|
||||
if ocsf_path not in existing_writers:
|
||||
ocsf_output = OCSFComplianceOutput(
|
||||
findings=finding_outputs,
|
||||
framework=fw,
|
||||
file_path=ocsf_path,
|
||||
provider=provider,
|
||||
)
|
||||
generated_outputs["compliance"].append(ocsf_output)
|
||||
existing_writers[ocsf_path] = ocsf_output
|
||||
ocsf_output.batch_write_data_to_file()
|
||||
|
||||
processed.add(compliance_name)
|
||||
|
||||
return processed
|
||||
|
||||
|
||||
def display_compliance_table(
|
||||
@@ -26,6 +119,9 @@ def display_compliance_table(
|
||||
output_filename: str,
|
||||
output_directory: str,
|
||||
compliance_overview: bool,
|
||||
universal_frameworks: dict = None,
|
||||
provider: str = None,
|
||||
output_formats: list = None,
|
||||
) -> None:
|
||||
"""
|
||||
display_compliance_table generates the compliance table for the given compliance framework.
|
||||
@@ -37,6 +133,9 @@ def display_compliance_table(
|
||||
output_filename (str): The output filename
|
||||
output_directory (str): The output directory
|
||||
compliance_overview (bool): The compliance
|
||||
universal_frameworks (dict): Optional universal ComplianceFramework objects
|
||||
provider (str): The current provider (e.g. "aws") for multi-provider filtering
|
||||
output_formats (list): The output formats to generate
|
||||
|
||||
Returns:
|
||||
None
|
||||
@@ -45,16 +144,24 @@ def display_compliance_table(
|
||||
findings = [f for f in findings if f.check_metadata.CheckID in bulk_checks_metadata]
|
||||
|
||||
try:
|
||||
if "ens_" in compliance_framework:
|
||||
get_ens_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "cis_" in compliance_framework:
|
||||
# Universal path: if the framework has TableConfig, use the universal renderer
|
||||
if universal_frameworks and compliance_framework in universal_frameworks:
|
||||
fw = universal_frameworks[compliance_framework]
|
||||
if fw.outputs and fw.outputs.table_config:
|
||||
get_universal_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
framework=fw,
|
||||
provider=provider,
|
||||
output_formats=output_formats,
|
||||
)
|
||||
return
|
||||
|
||||
if compliance_framework.startswith("cis_"):
|
||||
get_cis_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -63,7 +170,16 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "mitre_attack" in compliance_framework:
|
||||
elif compliance_framework.startswith("ens_"):
|
||||
get_ens_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif compliance_framework.startswith("mitre_attack"):
|
||||
get_mitre_attack_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -72,7 +188,7 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "kisa_isms_" in compliance_framework:
|
||||
elif compliance_framework.startswith("kisa"):
|
||||
get_kisa_ismsp_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -81,7 +197,7 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "threatscore_" in compliance_framework:
|
||||
elif compliance_framework.startswith("prowler_threatscore_"):
|
||||
get_prowler_threatscore_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -90,7 +206,7 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "csa_ccm_" in compliance_framework:
|
||||
elif compliance_framework.startswith("csa_ccm_"):
|
||||
get_csa_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -99,7 +215,7 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "c5_" in compliance_framework:
|
||||
elif compliance_framework.startswith("c5_"):
|
||||
get_c5_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
@@ -117,6 +233,15 @@ def display_compliance_table(
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
elif "essential_eight" in compliance_framework:
|
||||
get_essential_eight_table(
|
||||
findings,
|
||||
bulk_checks_metadata,
|
||||
compliance_framework,
|
||||
output_filename,
|
||||
output_directory,
|
||||
compliance_overview,
|
||||
)
|
||||
else:
|
||||
get_generic_compliance_table(
|
||||
findings,
|
||||
@@ -131,49 +256,3 @@ def display_compliance_table(
|
||||
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# TODO: this should be in the Check class
|
||||
def get_check_compliance(
|
||||
finding: Check_Report, provider_type: str, bulk_checks_metadata: dict
|
||||
) -> dict:
|
||||
"""get_check_compliance returns a map with the compliance framework as key and the requirements where the finding's check is present.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"CIS-1.4": ["2.1.3"],
|
||||
"CIS-1.5": ["2.1.3"],
|
||||
}
|
||||
|
||||
Args:
|
||||
finding (Any): The Check_Report finding
|
||||
provider_type (str): The provider type
|
||||
bulk_checks_metadata (dict): The bulk checks metadata
|
||||
|
||||
Returns:
|
||||
dict: The compliance framework as key and the requirements where the finding's check is present.
|
||||
"""
|
||||
try:
|
||||
check_compliance = {}
|
||||
# We have to retrieve all the check's compliance requirements
|
||||
if finding.check_metadata.CheckID in bulk_checks_metadata:
|
||||
for compliance in bulk_checks_metadata[
|
||||
finding.check_metadata.CheckID
|
||||
].Compliance:
|
||||
compliance_fw = compliance.Framework
|
||||
if compliance.Version:
|
||||
compliance_fw = f"{compliance_fw}-{compliance.Version}"
|
||||
# compliance.Provider == "Azure" or "Kubernetes"
|
||||
# provider_type == "azure" or "kubernetes"
|
||||
if compliance.Provider.upper() == provider_type.upper():
|
||||
if compliance_fw not in check_compliance:
|
||||
check_compliance[compliance_fw] = []
|
||||
for requirement in compliance.Requirements:
|
||||
check_compliance[compliance_fw].append(requirement.Id)
|
||||
return check_compliance
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
return {}
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
from prowler.lib.check.models import Check_Report
|
||||
from prowler.lib.logger import logger
|
||||
|
||||
|
||||
# TODO: this should be in the Check class
|
||||
def get_check_compliance(
|
||||
finding: Check_Report, provider_type: str, bulk_checks_metadata: dict
|
||||
) -> dict:
|
||||
"""get_check_compliance returns a map with the compliance framework as key and the requirements where the finding's check is present.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"CIS-1.4": ["2.1.3"],
|
||||
"CIS-1.5": ["2.1.3"],
|
||||
}
|
||||
|
||||
Args:
|
||||
finding (Any): The Check_Report finding
|
||||
provider_type (str): The provider type
|
||||
bulk_checks_metadata (dict): The bulk checks metadata
|
||||
|
||||
Returns:
|
||||
dict: The compliance framework as key and the requirements where the finding's check is present.
|
||||
"""
|
||||
try:
|
||||
check_compliance = {}
|
||||
# We have to retrieve all the check's compliance requirements
|
||||
if finding.check_metadata.CheckID in bulk_checks_metadata:
|
||||
for compliance in bulk_checks_metadata[
|
||||
finding.check_metadata.CheckID
|
||||
].Compliance:
|
||||
compliance_fw = compliance.Framework
|
||||
if compliance.Version:
|
||||
compliance_fw = f"{compliance_fw}-{compliance.Version}"
|
||||
# compliance.Provider == "Azure" or "Kubernetes"
|
||||
# provider_type == "azure" or "kubernetes"
|
||||
if compliance.Provider.upper() == provider_type.upper():
|
||||
if compliance_fw not in check_compliance:
|
||||
check_compliance[compliance_fw] = []
|
||||
for requirement in compliance.Requirements:
|
||||
check_compliance[compliance_fw].append(requirement.Id)
|
||||
return check_compliance
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
return {}
|
||||
@@ -0,0 +1,98 @@
|
||||
from colorama import Fore, Style
|
||||
from tabulate import tabulate
|
||||
|
||||
from prowler.config.config import orange_color
|
||||
|
||||
|
||||
def get_essential_eight_table(
|
||||
findings: list,
|
||||
bulk_checks_metadata: dict,
|
||||
compliance_framework: str,
|
||||
output_filename: str,
|
||||
output_directory: str,
|
||||
compliance_overview: bool,
|
||||
):
|
||||
sections = {}
|
||||
essential_eight_compliance_table = {
|
||||
"Provider": [],
|
||||
"Section": [],
|
||||
"Status": [],
|
||||
"Muted": [],
|
||||
}
|
||||
pass_count = []
|
||||
fail_count = []
|
||||
muted_count = []
|
||||
for index, finding in enumerate(findings):
|
||||
check = bulk_checks_metadata[finding.check_metadata.CheckID]
|
||||
check_compliances = check.Compliance
|
||||
for compliance in check_compliances:
|
||||
if compliance.Framework == "Essential-Eight":
|
||||
for requirement in compliance.Requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
section = attribute.Section
|
||||
if section not in sections:
|
||||
sections[section] = {
|
||||
"FAIL": 0,
|
||||
"PASS": 0,
|
||||
"Muted": 0,
|
||||
}
|
||||
if finding.muted:
|
||||
if index not in muted_count:
|
||||
muted_count.append(index)
|
||||
sections[section]["Muted"] += 1
|
||||
else:
|
||||
if finding.status == "FAIL" and index not in fail_count:
|
||||
fail_count.append(index)
|
||||
sections[section]["FAIL"] += 1
|
||||
elif finding.status == "PASS" and index not in pass_count:
|
||||
pass_count.append(index)
|
||||
sections[section]["PASS"] += 1
|
||||
|
||||
sections = dict(sorted(sections.items()))
|
||||
for section in sections:
|
||||
essential_eight_compliance_table["Provider"].append(compliance.Provider)
|
||||
essential_eight_compliance_table["Section"].append(section)
|
||||
if sections[section]["FAIL"] > 0:
|
||||
essential_eight_compliance_table["Status"].append(
|
||||
f"{Fore.RED}FAIL({sections[section]['FAIL']}){Style.RESET_ALL}"
|
||||
)
|
||||
elif sections[section]["PASS"] > 0:
|
||||
essential_eight_compliance_table["Status"].append(
|
||||
f"{Fore.GREEN}PASS({sections[section]['PASS']}){Style.RESET_ALL}"
|
||||
)
|
||||
else:
|
||||
essential_eight_compliance_table["Status"].append("-")
|
||||
essential_eight_compliance_table["Muted"].append(
|
||||
f"{orange_color}{sections[section]['Muted']}{Style.RESET_ALL}"
|
||||
)
|
||||
if len(fail_count) + len(pass_count) + len(muted_count) > 1:
|
||||
print(
|
||||
f"\nCompliance Status of {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Framework:"
|
||||
)
|
||||
total_findings_count = len(fail_count) + len(pass_count) + len(muted_count)
|
||||
overview_table = [
|
||||
[
|
||||
f"{Fore.RED}{round(len(fail_count) / total_findings_count * 100, 2)}% ({len(fail_count)}) FAIL{Style.RESET_ALL}",
|
||||
f"{Fore.GREEN}{round(len(pass_count) / total_findings_count * 100, 2)}% ({len(pass_count)}) PASS{Style.RESET_ALL}",
|
||||
f"{orange_color}{round(len(muted_count) / total_findings_count * 100, 2)}% ({len(muted_count)}) MUTED{Style.RESET_ALL}",
|
||||
]
|
||||
]
|
||||
print(tabulate(overview_table, tablefmt="rounded_grid"))
|
||||
if not compliance_overview:
|
||||
print(
|
||||
f"\nFramework {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Results:"
|
||||
)
|
||||
print(
|
||||
tabulate(
|
||||
essential_eight_compliance_table,
|
||||
headers="keys",
|
||||
tablefmt="rounded_grid",
|
||||
)
|
||||
)
|
||||
print(
|
||||
f"{Style.BRIGHT}* Only sections containing results appear.{Style.RESET_ALL}"
|
||||
)
|
||||
print(f"\nDetailed results of {compliance_framework.upper()} are in:")
|
||||
print(
|
||||
f" - CSV: {output_directory}/compliance/{output_filename}_{compliance_framework}.csv\n"
|
||||
)
|
||||
@@ -0,0 +1,111 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.compliance.essential_eight.models import (
|
||||
EssentialEightAWSModel,
|
||||
)
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class EssentialEightAWS(ComplianceOutput):
|
||||
"""
|
||||
This class represents the AWS ASD Essential Eight compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into AWS Essential Eight compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into AWS Essential Eight compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = EssentialEightAWSModel(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
AccountId=finding.account_uid,
|
||||
Region=finding.region,
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_MaturityLevel=attribute.MaturityLevel,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_CloudApplicability=attribute.CloudApplicability,
|
||||
Requirements_Attributes_MitigatedThreats=", ".join(
|
||||
attribute.MitigatedThreats
|
||||
),
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
Requirements_Attributes_RationaleStatement=attribute.RationaleStatement,
|
||||
Requirements_Attributes_ImpactStatement=attribute.ImpactStatement,
|
||||
Requirements_Attributes_RemediationProcedure=attribute.RemediationProcedure,
|
||||
Requirements_Attributes_AuditProcedure=attribute.AuditProcedure,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_References=attribute.References,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = EssentialEightAWSModel(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
AccountId="",
|
||||
Region="",
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_MaturityLevel=attribute.MaturityLevel,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_CloudApplicability=attribute.CloudApplicability,
|
||||
Requirements_Attributes_MitigatedThreats=", ".join(
|
||||
attribute.MitigatedThreats
|
||||
),
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
Requirements_Attributes_RationaleStatement=attribute.RationaleStatement,
|
||||
Requirements_Attributes_ImpactStatement=attribute.ImpactStatement,
|
||||
Requirements_Attributes_RemediationProcedure=attribute.RemediationProcedure,
|
||||
Requirements_Attributes_AuditProcedure=attribute.AuditProcedure,
|
||||
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
|
||||
Requirements_Attributes_References=attribute.References,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -0,0 +1,35 @@
|
||||
from pydantic.v1 import BaseModel
|
||||
|
||||
|
||||
class EssentialEightAWSModel(BaseModel):
|
||||
"""
|
||||
EssentialEightAWSModel generates a finding's output in AWS ASD Essential Eight Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
AccountId: str
|
||||
Region: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_MaturityLevel: str
|
||||
Requirements_Attributes_AssessmentStatus: str
|
||||
Requirements_Attributes_CloudApplicability: str
|
||||
Requirements_Attributes_MitigatedThreats: str
|
||||
Requirements_Attributes_Description: str
|
||||
Requirements_Attributes_RationaleStatement: str
|
||||
Requirements_Attributes_ImpactStatement: str
|
||||
Requirements_Attributes_RemediationProcedure: str
|
||||
Requirements_Attributes_AuditProcedure: str
|
||||
Requirements_Attributes_AdditionalInformation: str
|
||||
Requirements_Attributes_References: str
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
Framework: str
|
||||
Name: str
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from py_ocsf_models.events.base_event import SeverityID
|
||||
from py_ocsf_models.events.base_event import StatusID as EventStatusID
|
||||
@@ -20,11 +21,12 @@ from py_ocsf_models.objects.resource_details import ResourceDetails
|
||||
from prowler.config.config import prowler_version
|
||||
from prowler.lib.check.compliance_models import ComplianceFramework
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
from prowler.lib.outputs.utils import unroll_dict_to_list
|
||||
from prowler.lib.utils.utils import open_file
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
PROWLER_TO_COMPLIANCE_STATUS = {
|
||||
"PASS": ComplianceStatusID.Pass,
|
||||
"FAIL": ComplianceStatusID.Fail,
|
||||
@@ -32,6 +34,40 @@ PROWLER_TO_COMPLIANCE_STATUS = {
|
||||
}
|
||||
|
||||
|
||||
def _sanitize_resource_data(resource_details, resource_metadata) -> dict:
|
||||
"""Ensure resource data is JSON-serializable.
|
||||
|
||||
Service resource_metadata may carry non-serializable objects (e.g. raw
|
||||
Pydantic models or service classes such as ``Trail`` / ``LifecyclePolicy``).
|
||||
Convert them to plain dicts and roundtrip through JSON so the resulting
|
||||
ComplianceFinding can be serialized without errors.
|
||||
"""
|
||||
|
||||
def _make_serializable(obj):
|
||||
if hasattr(obj, "model_dump") and callable(obj.model_dump):
|
||||
return _make_serializable(obj.model_dump())
|
||||
if hasattr(obj, "dict") and callable(obj.dict):
|
||||
return _make_serializable(obj.dict())
|
||||
if isinstance(obj, dict):
|
||||
return {str(k): _make_serializable(v) for k, v in obj.items()}
|
||||
if isinstance(obj, (list, tuple)):
|
||||
return [_make_serializable(v) for v in obj]
|
||||
return obj
|
||||
|
||||
try:
|
||||
converted = _make_serializable(resource_metadata)
|
||||
sanitized_metadata = json.loads(json.dumps(converted, default=str))
|
||||
except (TypeError, ValueError, RecursionError) as error:
|
||||
logger.warning(
|
||||
f"Failed to serialize resource metadata, defaulting to empty: {error}"
|
||||
)
|
||||
sanitized_metadata = {}
|
||||
return {
|
||||
"details": resource_details,
|
||||
"metadata": sanitized_metadata,
|
||||
}
|
||||
|
||||
|
||||
def _to_snake_case(name: str) -> str:
|
||||
"""Convert a PascalCase or camelCase string to snake_case."""
|
||||
import re
|
||||
@@ -108,7 +144,7 @@ class OCSFComplianceOutput:
|
||||
|
||||
def _transform(
|
||||
self,
|
||||
findings: List[Finding],
|
||||
findings: List["Finding"],
|
||||
framework: ComplianceFramework,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
@@ -177,7 +213,7 @@ class OCSFComplianceOutput:
|
||||
|
||||
def _build_compliance_finding(
|
||||
self,
|
||||
finding: Finding,
|
||||
finding: "Finding",
|
||||
framework: ComplianceFramework,
|
||||
requirement,
|
||||
compliance_name: str,
|
||||
@@ -195,7 +231,9 @@ class OCSFComplianceOutput:
|
||||
finding.metadata.Severity.capitalize(),
|
||||
SeverityID.Unknown,
|
||||
)
|
||||
event_status = OCSF.get_finding_status_id(finding.muted)
|
||||
event_status = (
|
||||
EventStatusID.Suppressed if finding.muted else EventStatusID.New
|
||||
)
|
||||
|
||||
time_value = (
|
||||
int(finding.timestamp.timestamp())
|
||||
@@ -268,10 +306,10 @@ class OCSFComplianceOutput:
|
||||
if finding.provider == "kubernetes"
|
||||
else None
|
||||
),
|
||||
data={
|
||||
"details": finding.resource_details,
|
||||
"metadata": finding.resource_metadata,
|
||||
},
|
||||
data=_sanitize_resource_data(
|
||||
finding.resource_details,
|
||||
finding.resource_metadata,
|
||||
),
|
||||
)
|
||||
],
|
||||
severity_id=finding_severity.value,
|
||||
|
||||
@@ -0,0 +1,294 @@
|
||||
from csv import DictWriter
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from pydantic.v1 import create_model
|
||||
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import ComplianceFramework
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.utils.utils import open_file
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
PROVIDER_HEADER_MAP = {
|
||||
"aws": ("AccountId", "account_uid", "Region", "region"),
|
||||
"azure": ("SubscriptionId", "account_uid", "Location", "region"),
|
||||
"gcp": ("ProjectId", "account_uid", "Location", "region"),
|
||||
"kubernetes": ("Context", "account_name", "Namespace", "region"),
|
||||
"m365": ("TenantId", "account_uid", "Location", "region"),
|
||||
"github": ("Account_Name", "account_name", "Account_Id", "account_uid"),
|
||||
"oraclecloud": ("TenancyId", "account_uid", "Region", "region"),
|
||||
"alibabacloud": ("AccountId", "account_uid", "Region", "region"),
|
||||
"nhn": ("AccountId", "account_uid", "Region", "region"),
|
||||
}
|
||||
_DEFAULT_HEADERS = ("AccountId", "account_uid", "Region", "region")
|
||||
|
||||
|
||||
class UniversalComplianceOutput:
|
||||
"""Universal compliance CSV output driven by ComplianceFramework metadata.
|
||||
|
||||
Dynamically builds a Pydantic row model from attributes_metadata so that
|
||||
CSV columns match the framework's declared attribute fields.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
findings: list,
|
||||
framework: ComplianceFramework,
|
||||
file_path: str = None,
|
||||
from_cli: bool = True,
|
||||
provider: str = None,
|
||||
) -> None:
|
||||
self._data = []
|
||||
self._file_descriptor = None
|
||||
self.file_path = file_path
|
||||
self._from_cli = from_cli
|
||||
self._provider = provider
|
||||
self.close_file = False
|
||||
|
||||
if file_path:
|
||||
path_obj = Path(file_path)
|
||||
self._file_extension = path_obj.suffix if path_obj.suffix else ""
|
||||
|
||||
if findings:
|
||||
self._row_model = self._build_row_model(framework)
|
||||
compliance_name = (
|
||||
framework.framework + "-" + framework.version
|
||||
if framework.version
|
||||
else framework.framework
|
||||
)
|
||||
self._transform(findings, framework, compliance_name)
|
||||
if not self._file_descriptor and file_path:
|
||||
self._create_file_descriptor(file_path)
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
return self._data
|
||||
|
||||
def _build_row_model(self, framework: ComplianceFramework):
|
||||
"""Build a dynamic Pydantic model from attributes_metadata."""
|
||||
acct_header, acct_field, loc_header, loc_field = PROVIDER_HEADER_MAP.get(
|
||||
(self._provider or "").lower(), _DEFAULT_HEADERS
|
||||
)
|
||||
self._acct_header = acct_header
|
||||
self._acct_field = acct_field
|
||||
self._loc_header = loc_header
|
||||
self._loc_field = loc_field
|
||||
|
||||
# Base fields present in every compliance CSV
|
||||
fields = {
|
||||
"Provider": (str, ...),
|
||||
"Description": (str, ...),
|
||||
acct_header: (str, ...),
|
||||
loc_header: (str, ...),
|
||||
"AssessmentDate": (str, ...),
|
||||
"Requirements_Id": (str, ...),
|
||||
"Requirements_Description": (str, ...),
|
||||
}
|
||||
|
||||
# Dynamic attribute columns from metadata
|
||||
if framework.attributes_metadata:
|
||||
for attr_meta in framework.attributes_metadata:
|
||||
if not attr_meta.output_formats.csv:
|
||||
continue
|
||||
field_name = f"Requirements_Attributes_{attr_meta.key}"
|
||||
# Map type strings to Python types
|
||||
type_map = {
|
||||
"str": Optional[str],
|
||||
"int": Optional[int],
|
||||
"float": Optional[float],
|
||||
"bool": Optional[bool],
|
||||
"list_str": Optional[str], # Serialized as joined string
|
||||
"list_dict": Optional[str], # Serialized as string
|
||||
}
|
||||
py_type = type_map.get(attr_meta.type, Optional[str])
|
||||
fields[field_name] = (py_type, None)
|
||||
|
||||
# Check if any requirement has MITRE fields
|
||||
has_mitre = any(req.tactics for req in framework.requirements if req.tactics)
|
||||
if has_mitre:
|
||||
fields["Requirements_Tactics"] = (Optional[str], None)
|
||||
fields["Requirements_SubTechniques"] = (Optional[str], None)
|
||||
fields["Requirements_Platforms"] = (Optional[str], None)
|
||||
fields["Requirements_TechniqueURL"] = (Optional[str], None)
|
||||
|
||||
# Trailing fields
|
||||
fields["Status"] = (str, ...)
|
||||
fields["StatusExtended"] = (str, ...)
|
||||
fields["ResourceId"] = (str, ...)
|
||||
fields["ResourceName"] = (str, ...)
|
||||
fields["CheckId"] = (str, ...)
|
||||
fields["Muted"] = (bool, ...)
|
||||
fields["Framework"] = (str, ...)
|
||||
fields["Name"] = (str, ...)
|
||||
|
||||
return create_model("UniversalComplianceRow", **fields)
|
||||
|
||||
def _serialize_attr_value(self, value):
|
||||
"""Serialize attribute values for CSV."""
|
||||
if isinstance(value, list):
|
||||
if value and isinstance(value[0], dict):
|
||||
return str(value)
|
||||
return " | ".join(str(v) for v in value)
|
||||
return value
|
||||
|
||||
def _build_row(self, finding, framework, requirement, is_manual=False):
|
||||
"""Build a single row dict for a finding + requirement combination."""
|
||||
row = {
|
||||
"Provider": (
|
||||
finding.provider
|
||||
if not is_manual
|
||||
else (framework.provider or self._provider or "").lower()
|
||||
),
|
||||
"Description": framework.description,
|
||||
self._acct_header: (
|
||||
getattr(finding, self._acct_field, "") if not is_manual else ""
|
||||
),
|
||||
self._loc_header: (
|
||||
getattr(finding, self._loc_field, "") if not is_manual else ""
|
||||
),
|
||||
"AssessmentDate": str(timestamp),
|
||||
"Requirements_Id": requirement.id,
|
||||
"Requirements_Description": requirement.description,
|
||||
}
|
||||
|
||||
# Add dynamic attribute columns
|
||||
if framework.attributes_metadata:
|
||||
for attr_meta in framework.attributes_metadata:
|
||||
if not attr_meta.output_formats.csv:
|
||||
continue
|
||||
field_name = f"Requirements_Attributes_{attr_meta.key}"
|
||||
raw_val = requirement.attributes.get(attr_meta.key)
|
||||
row[field_name] = (
|
||||
self._serialize_attr_value(raw_val) if raw_val is not None else None
|
||||
)
|
||||
|
||||
# MITRE fields
|
||||
if requirement.tactics:
|
||||
row["Requirements_Tactics"] = (
|
||||
" | ".join(requirement.tactics) if requirement.tactics else None
|
||||
)
|
||||
row["Requirements_SubTechniques"] = (
|
||||
" | ".join(requirement.sub_techniques)
|
||||
if requirement.sub_techniques
|
||||
else None
|
||||
)
|
||||
row["Requirements_Platforms"] = (
|
||||
" | ".join(requirement.platforms) if requirement.platforms else None
|
||||
)
|
||||
row["Requirements_TechniqueURL"] = requirement.technique_url
|
||||
|
||||
row["Status"] = finding.status if not is_manual else "MANUAL"
|
||||
row["StatusExtended"] = (
|
||||
finding.status_extended if not is_manual else "Manual check"
|
||||
)
|
||||
row["ResourceId"] = finding.resource_uid if not is_manual else "manual_check"
|
||||
row["ResourceName"] = finding.resource_name if not is_manual else "Manual check"
|
||||
row["CheckId"] = finding.check_id if not is_manual else "manual"
|
||||
row["Muted"] = finding.muted if not is_manual else False
|
||||
row["Framework"] = framework.framework
|
||||
row["Name"] = framework.name
|
||||
|
||||
return row
|
||||
|
||||
def _transform(
|
||||
self,
|
||||
findings: list["Finding"],
|
||||
framework: ComplianceFramework,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""Transform findings into universal compliance CSV rows."""
|
||||
# Build check -> requirements map (filtered by provider for dict checks)
|
||||
check_req_map = {}
|
||||
for req in framework.requirements:
|
||||
checks = req.checks
|
||||
if self._provider:
|
||||
all_checks = checks.get(self._provider.lower(), [])
|
||||
else:
|
||||
all_checks = []
|
||||
for check_list in checks.values():
|
||||
all_checks.extend(check_list)
|
||||
for check_id in all_checks:
|
||||
if check_id not in check_req_map:
|
||||
check_req_map[check_id] = []
|
||||
check_req_map[check_id].append(req)
|
||||
|
||||
# Process findings using the provider-filtered check_req_map.
|
||||
# This ensures that for multi-provider dict checks, only the checks
|
||||
# belonging to the current provider produce output rows.
|
||||
for finding in findings:
|
||||
check_id = finding.check_id
|
||||
if check_id in check_req_map:
|
||||
for req in check_req_map[check_id]:
|
||||
row = self._build_row(finding, framework, req)
|
||||
try:
|
||||
self._data.append(self._row_model(**row))
|
||||
except Exception as e:
|
||||
logger.debug(f"Skipping row for {req.id}: {e}")
|
||||
|
||||
# Manual requirements (no checks or empty dict)
|
||||
for req in framework.requirements:
|
||||
checks = req.checks
|
||||
if self._provider:
|
||||
has_checks = bool(checks.get(self._provider.lower(), []))
|
||||
else:
|
||||
has_checks = any(checks.values())
|
||||
|
||||
if not has_checks:
|
||||
# Use a dummy finding-like namespace for manual rows
|
||||
row = self._build_row(
|
||||
_ManualFindingStub(), framework, req, is_manual=True
|
||||
)
|
||||
try:
|
||||
self._data.append(self._row_model(**row))
|
||||
except Exception as e:
|
||||
logger.debug(f"Skipping manual row for {req.id}: {e}")
|
||||
|
||||
def _create_file_descriptor(self, file_path: str) -> None:
|
||||
try:
|
||||
self._file_descriptor = open_file(file_path, "a")
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def batch_write_data_to_file(self) -> None:
|
||||
"""Write findings data to CSV."""
|
||||
try:
|
||||
if (
|
||||
getattr(self, "_file_descriptor", None)
|
||||
and not self._file_descriptor.closed
|
||||
and self._data
|
||||
):
|
||||
csv_writer = DictWriter(
|
||||
self._file_descriptor,
|
||||
fieldnames=[field.upper() for field in self._data[0].dict().keys()],
|
||||
delimiter=";",
|
||||
)
|
||||
if self._file_descriptor.tell() == 0:
|
||||
csv_writer.writeheader()
|
||||
for row in self._data:
|
||||
csv_writer.writerow({k.upper(): v for k, v in row.dict().items()})
|
||||
if self.close_file or self._from_cli:
|
||||
self._file_descriptor.close()
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class _ManualFindingStub:
|
||||
"""Minimal stub to satisfy _build_row for manual requirements."""
|
||||
|
||||
provider = ""
|
||||
account_uid = ""
|
||||
account_name = ""
|
||||
region = ""
|
||||
status = "MANUAL"
|
||||
status_extended = "Manual check"
|
||||
resource_uid = "manual_check"
|
||||
resource_name = "Manual check"
|
||||
check_id = "manual"
|
||||
muted = False
|
||||
@@ -15,7 +15,7 @@ from prowler.lib.check.models import (
|
||||
)
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.common import Status, fill_common_finding_data
|
||||
from prowler.lib.outputs.compliance.compliance import get_check_compliance
|
||||
from prowler.lib.outputs.compliance.compliance_check import get_check_compliance
|
||||
from prowler.lib.outputs.utils import unroll_tags
|
||||
from prowler.lib.utils.utils import dict_to_lowercase, get_nested_attribute
|
||||
from prowler.providers.common.provider import Provider
|
||||
@@ -245,15 +245,16 @@ class Finding(BaseModel):
|
||||
elif provider.type == "kubernetes":
|
||||
if provider.identity.context == "In-Cluster":
|
||||
output_data["auth_method"] = "in-cluster"
|
||||
output_data["provider_uid"] = provider.identity.cluster
|
||||
else:
|
||||
output_data["auth_method"] = "kubeconfig"
|
||||
output_data["provider_uid"] = provider.identity.context
|
||||
output_data["resource_name"] = check_output.resource_name
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["account_name"] = f"context: {provider.identity.context}"
|
||||
output_data["account_uid"] = get_nested_attribute(
|
||||
provider, "identity.cluster"
|
||||
)
|
||||
output_data["provider_uid"] = provider.identity.context
|
||||
output_data["region"] = f"namespace: {check_output.namespace}"
|
||||
|
||||
elif provider.type == "github":
|
||||
|
||||
@@ -25,8 +25,8 @@ from prowler.lib.utils.utils import open_file, parse_json_file, print_boxes
|
||||
from prowler.providers.aws.config import (
|
||||
AWS_REGION_US_EAST_1,
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION,
|
||||
BOTO3_USER_AGENT_EXTRA,
|
||||
ROLE_SESSION_NAME,
|
||||
get_default_session_config,
|
||||
)
|
||||
from prowler.providers.aws.exceptions.exceptions import (
|
||||
AWSAccessKeyIDInvalidError,
|
||||
@@ -227,14 +227,15 @@ class AwsProvider(Provider):
|
||||
|
||||
# TODO: Use AwsSetUpSession ?????
|
||||
# Configure the initial AWS Session using the local credentials: profile or environment variables
|
||||
session_config = self.set_session_config(retries_max_attempts)
|
||||
aws_session = self.setup_session(
|
||||
mfa=mfa,
|
||||
profile=profile,
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
aws_session_token=aws_session_token,
|
||||
session_config=session_config,
|
||||
)
|
||||
session_config = self.set_session_config(retries_max_attempts)
|
||||
# Current session and the original session points to the same session object until we get a new one, if needed
|
||||
self._session = AWSSession(
|
||||
current_session=aws_session,
|
||||
@@ -630,6 +631,7 @@ class AwsProvider(Provider):
|
||||
aws_access_key_id: str = None,
|
||||
aws_secret_access_key: str = None,
|
||||
aws_session_token: Optional[str] = None,
|
||||
session_config: Optional[Config] = None,
|
||||
) -> Session:
|
||||
"""
|
||||
setup_session sets up an AWS session using the provided credentials.
|
||||
@@ -640,6 +642,9 @@ class AwsProvider(Provider):
|
||||
- aws_access_key_id: The AWS access key ID.
|
||||
- aws_secret_access_key: The AWS secret access key.
|
||||
- aws_session_token: The AWS session token, optional.
|
||||
- session_config: Botocore Config applied as the session's default
|
||||
client config so every client created from the session inherits
|
||||
the Prowler user agent and retry settings.
|
||||
|
||||
Returns:
|
||||
- Session: The AWS session.
|
||||
@@ -650,6 +655,9 @@ class AwsProvider(Provider):
|
||||
try:
|
||||
logger.debug("Creating original session ...")
|
||||
|
||||
if session_config is None:
|
||||
session_config = AwsProvider.set_session_config(None)
|
||||
|
||||
session_arguments = {}
|
||||
if profile:
|
||||
session_arguments["profile_name"] = profile
|
||||
@@ -661,6 +669,7 @@ class AwsProvider(Provider):
|
||||
|
||||
if mfa:
|
||||
session = Session(**session_arguments)
|
||||
session._session.set_default_client_config(session_config)
|
||||
sts_client = session.client("sts")
|
||||
|
||||
# TODO: pass values from the input
|
||||
@@ -673,7 +682,7 @@ class AwsProvider(Provider):
|
||||
session_credentials = sts_client.get_session_token(
|
||||
**get_session_token_arguments
|
||||
)
|
||||
return Session(
|
||||
mfa_session = Session(
|
||||
aws_access_key_id=session_credentials["Credentials"]["AccessKeyId"],
|
||||
aws_secret_access_key=session_credentials["Credentials"][
|
||||
"SecretAccessKey"
|
||||
@@ -682,8 +691,12 @@ class AwsProvider(Provider):
|
||||
"SessionToken"
|
||||
],
|
||||
)
|
||||
mfa_session._session.set_default_client_config(session_config)
|
||||
return mfa_session
|
||||
else:
|
||||
return Session(**session_arguments)
|
||||
session = Session(**session_arguments)
|
||||
session._session.set_default_client_config(session_config)
|
||||
return session
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"AWSSetUpSessionError[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -698,6 +711,7 @@ class AwsProvider(Provider):
|
||||
identity: AWSIdentityInfo,
|
||||
assumed_role_configuration: AWSAssumeRoleConfiguration,
|
||||
session: AWSSession,
|
||||
session_config: Optional[Config] = None,
|
||||
) -> Session:
|
||||
"""
|
||||
Sets up an assumed session using the provided assumed role credentials.
|
||||
@@ -742,6 +756,13 @@ class AwsProvider(Provider):
|
||||
assumed_session = BotocoreSession()
|
||||
assumed_session._credentials = assumed_refreshable_credentials
|
||||
assumed_session.set_config_variable("region", identity.profile_region)
|
||||
if session_config is None:
|
||||
session_config = (
|
||||
session.session_config
|
||||
if session is not None
|
||||
else AwsProvider.set_session_config(None)
|
||||
)
|
||||
assumed_session.set_default_client_config(session_config)
|
||||
return Session(
|
||||
profile_name=identity.profile,
|
||||
botocore_session=assumed_session,
|
||||
@@ -870,7 +891,7 @@ class AwsProvider(Provider):
|
||||
|
||||
for region in enabled_regions:
|
||||
regional_client = self._session.current_session.client(
|
||||
service, region_name=region, config=self._session.session_config
|
||||
service, region_name=region
|
||||
)
|
||||
regional_client.region = region
|
||||
regional_clients[region] = regional_client
|
||||
@@ -1140,21 +1161,16 @@ class AwsProvider(Provider):
|
||||
Returns:
|
||||
- Config: The botocore Config object
|
||||
"""
|
||||
# Set the maximum retries for the standard retrier config
|
||||
default_session_config = Config(
|
||||
retries={"max_attempts": 3, "mode": "standard"},
|
||||
user_agent_extra=BOTO3_USER_AGENT_EXTRA,
|
||||
)
|
||||
default_session_config = get_default_session_config()
|
||||
if retries_max_attempts:
|
||||
# Create the new config
|
||||
config = Config(
|
||||
retries={
|
||||
"max_attempts": retries_max_attempts,
|
||||
"mode": "standard",
|
||||
},
|
||||
default_session_config = default_session_config.merge(
|
||||
Config(
|
||||
retries={
|
||||
"max_attempts": retries_max_attempts,
|
||||
"mode": "standard",
|
||||
},
|
||||
)
|
||||
)
|
||||
# Merge the new configuration
|
||||
default_session_config = default_session_config.merge(config)
|
||||
|
||||
return default_session_config
|
||||
|
||||
@@ -1425,6 +1441,9 @@ class AwsProvider(Provider):
|
||||
region_name=aws_region,
|
||||
profile_name=profile,
|
||||
)
|
||||
session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(None)
|
||||
)
|
||||
|
||||
caller_identity = AwsProvider.validate_credentials(session, aws_region)
|
||||
# Do an extra validation if the AWS account ID is provided
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
import os
|
||||
|
||||
from botocore.config import Config
|
||||
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION = "us-east-1"
|
||||
AWS_REGION_US_EAST_1 = "us-east-1"
|
||||
BOTO3_USER_AGENT_EXTRA = os.getenv("PROWLER_AWS_BOTO3_USER_AGENT_EXTRA", "APN_1826889")
|
||||
ROLE_SESSION_NAME = "ProwlerAssessmentSession"
|
||||
|
||||
|
||||
def get_default_session_config() -> Config:
|
||||
return Config(
|
||||
user_agent_extra=BOTO3_USER_AGENT_EXTRA,
|
||||
retries={"max_attempts": 3, "mode": "standard"},
|
||||
)
|
||||
|
||||
@@ -56,9 +56,7 @@ def quick_inventory(provider: AwsProvider, args):
|
||||
try:
|
||||
# Scan IAM only once
|
||||
if not iam_was_scanned:
|
||||
global_resources.extend(
|
||||
get_iam_resources(provider.session.current_session)
|
||||
)
|
||||
global_resources.extend(get_iam_resources(provider))
|
||||
iam_was_scanned = True
|
||||
|
||||
# Get regional S3 buckets since none-tagged buckets are not supported by the resourcegroupstaggingapi
|
||||
@@ -312,8 +310,8 @@ def create_output(resources: list, provider: AwsProvider, args):
|
||||
if args.output_bucket:
|
||||
output_bucket = args.output_bucket
|
||||
bucket_session = provider.session.current_session
|
||||
# Check if -D was input
|
||||
elif args.output_bucket_no_assume:
|
||||
# The outer condition guarantees -D was input when -B was not
|
||||
else:
|
||||
output_bucket = args.output_bucket_no_assume
|
||||
bucket_session = provider.session.original_session
|
||||
|
||||
@@ -375,9 +373,9 @@ def get_regional_buckets(provider: AwsProvider, region: str) -> list:
|
||||
return regional_buckets
|
||||
|
||||
|
||||
def get_iam_resources(session) -> list:
|
||||
def get_iam_resources(provider: AwsProvider) -> list:
|
||||
iam_resources = []
|
||||
iam_client = session.client("iam")
|
||||
iam_client = provider.session.current_session.client("iam")
|
||||
try:
|
||||
get_roles_paginator = iam_client.get_paginator("list_roles")
|
||||
for page in get_roles_paginator.paginate():
|
||||
|
||||
@@ -111,6 +111,13 @@ class S3:
|
||||
- None
|
||||
"""
|
||||
if session:
|
||||
# Preserve the caller's existing default config (and the
|
||||
# retries_max_attempts already baked into it) instead of clobbering
|
||||
# it with a freshly built one.
|
||||
if session._session.get_default_client_config() is None:
|
||||
session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(retries_max_attempts)
|
||||
)
|
||||
self._session = session.client(__class__.__name__.lower())
|
||||
else:
|
||||
aws_setup_session = AwsSetUpSession(
|
||||
@@ -127,8 +134,7 @@ class S3:
|
||||
regions=regions,
|
||||
)
|
||||
self._session = aws_setup_session._session.current_session.client(
|
||||
__class__.__name__.lower(),
|
||||
config=aws_setup_session._session.session_config,
|
||||
__class__.__name__.lower()
|
||||
)
|
||||
|
||||
self._bucket_name = bucket_name
|
||||
@@ -313,6 +319,9 @@ class S3:
|
||||
region_name=aws_region,
|
||||
profile_name=profile,
|
||||
)
|
||||
session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(None)
|
||||
)
|
||||
s3_client = session.client(__class__.__name__.lower())
|
||||
if "s3://" in bucket_name:
|
||||
bucket_name = bucket_name.removeprefix("s3://")
|
||||
|
||||
@@ -148,6 +148,13 @@ class SecurityHub:
|
||||
regions=regions,
|
||||
)
|
||||
self._session = aws_setup_session._session.current_session
|
||||
# Only install the Prowler default config when the caller-supplied
|
||||
# session does not already carry one — overwriting would drop the
|
||||
# provider's retries_max_attempts value.
|
||||
if aws_session and self._session._session.get_default_client_config() is None:
|
||||
self._session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(retries_max_attempts)
|
||||
)
|
||||
self._aws_account_id = aws_account_id
|
||||
if not aws_partition:
|
||||
aws_partition = AwsProvider.validate_credentials(
|
||||
@@ -235,7 +242,7 @@ class SecurityHub:
|
||||
|
||||
Args:
|
||||
region (str): AWS region to check.
|
||||
session (Session): AWS session object.
|
||||
session (Session): AWS session object. Expected to carry the Prowler default client config.
|
||||
aws_account_id (str): AWS account ID.
|
||||
aws_partition (str): AWS partition.
|
||||
|
||||
@@ -540,6 +547,9 @@ class SecurityHub:
|
||||
region_name=aws_region,
|
||||
profile_name=profile,
|
||||
)
|
||||
session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(None)
|
||||
)
|
||||
|
||||
all_regions = AwsProvider.get_available_aws_service_regions(
|
||||
service="securityhub", partition=aws_partition
|
||||
|
||||
@@ -32,7 +32,13 @@ class AWSService:
|
||||
def is_failed_check(cls, check_id, arn):
|
||||
return (check_id.split(".")[-1], arn) in cls.failed_checks
|
||||
|
||||
def __init__(self, service: str, provider: AwsProvider, global_service=False):
|
||||
def __init__(
|
||||
self,
|
||||
service: str,
|
||||
provider: AwsProvider,
|
||||
global_service=False,
|
||||
region: str = None,
|
||||
):
|
||||
# Audit Information
|
||||
# Do we need to store the whole provider?
|
||||
self.provider = provider
|
||||
@@ -61,7 +67,7 @@ class AWSService:
|
||||
# Get a single region and client if the service needs it (e.g. AWS Global Service)
|
||||
# We cannot include this within an else because some services needs both the regional_clients
|
||||
# and a single client like S3
|
||||
self.region = provider.get_default_region(
|
||||
self.region = region or provider.get_default_region(
|
||||
self.service, global_service=global_service
|
||||
)
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
|
||||
@@ -73,15 +73,15 @@ class AwsSetUpSession:
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
)
|
||||
# Setup the AWS session
|
||||
session_config = AwsProvider.set_session_config(retries_max_attempts)
|
||||
aws_session = AwsProvider.setup_session(
|
||||
mfa=mfa,
|
||||
profile=profile,
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
aws_session_token=aws_session_token,
|
||||
session_config=session_config,
|
||||
)
|
||||
session_config = AwsProvider.set_session_config(retries_max_attempts)
|
||||
self._session = AWSSession(
|
||||
current_session=aws_session,
|
||||
session_config=session_config,
|
||||
|
||||
+44
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "bedrock_guardrails_configured",
|
||||
"CheckTitle": "Bedrock has at least one guardrail configured in the audited region",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis"
|
||||
],
|
||||
"ServiceName": "bedrock",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"ResourceGroup": "ai_ml",
|
||||
"Description": "**Amazon Bedrock guardrails** provide reusable safety policies for filtering harmful or unwanted content in model inputs and outputs.\n\nThis evaluation checks whether at least one guardrail exists in each successfully scanned region. It does **not** verify that guardrails are attached to agents or passed on individual model invocation API calls.",
|
||||
"Risk": "Without any configured **Bedrock guardrails** in a region, teams lack a native reusable policy object for **content filtering** and **safety controls**. Applications may invoke models without standardized protections against **harmful content**, **prompt injection**, or **sensitive-data exposure** unless equivalent controls are enforced elsewhere.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html",
|
||||
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws bedrock create-guardrail --name example_resource --blocked-input-messaging 'Blocked' --blocked-outputs-messaging 'Blocked' --content-policy-config 'filtersConfig=[{type=HATE,inputStrength=HIGH,outputStrength=HIGH}]'",
|
||||
"NativeIaC": "```yaml\nResources:\n example_resource:\n Type: AWS::Bedrock::Guardrail\n Properties:\n Name: example_resource\n BlockedInputMessaging: \"Blocked\"\n BlockedOutputsMessaging: \"Blocked\"\n ContentPolicyConfig:\n FiltersConfig:\n - Type: HATE\n InputStrength: HIGH # Critical: configures content filtering\n OutputStrength: HIGH\n```",
|
||||
"Other": "1. Open the AWS Console and go to Amazon Bedrock\n2. Select **Guardrails** from the navigation pane\n3. Click **Create guardrail**\n4. Configure content filters for harmful categories\n5. Set input and output messaging for blocked content\n6. Click **Create guardrail**",
|
||||
"Terraform": "```hcl\nresource \"aws_bedrock_guardrail\" \"example_resource\" {\n name = \"example_resource\"\n blocked_input_messaging = \"Blocked\"\n blocked_outputs_messaging = \"Blocked\"\n\n content_policy_config {\n filters_config {\n type = \"HATE\" # Critical: configures content filtering\n input_strength = \"HIGH\"\n output_strength = \"HIGH\"\n }\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Create at least one **Bedrock guardrail** in each region where Bedrock is used, then separately ensure those guardrails are attached to relevant agents and invocation paths.\n- Configure **content filters** for harmful categories (hate, violence, sexual, misconduct)\n- Add **sensitive information filters** and **denied topic policies**\n- Apply guardrails at the API call level using `guardrailIdentifier` where supported",
|
||||
"Url": "https://hub.prowler.com/check/bedrock_guardrails_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"gen-ai"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
],
|
||||
"Notes": "This check validates guardrail existence per successfully scanned region. It does not verify attachment to agents or the use of guardrails on model invocations. Regions where Bedrock guardrails cannot be enumerated are skipped to avoid false failures."
|
||||
}
|
||||
+50
@@ -0,0 +1,50 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.bedrock.bedrock_client import bedrock_client
|
||||
|
||||
|
||||
class bedrock_guardrails_configured(Check):
|
||||
"""Ensure Bedrock guardrails are configured in successfully scanned regions.
|
||||
|
||||
This check verifies that at least one Amazon Bedrock guardrail is configured
|
||||
in each successfully scanned region.
|
||||
- PASS: At least one Bedrock guardrail is configured in the region.
|
||||
- FAIL: No Bedrock guardrails are configured in the region.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[Check_Report_AWS]:
|
||||
"""Execute the check logic.
|
||||
|
||||
Returns:
|
||||
A list of reports containing the result of the check.
|
||||
"""
|
||||
findings = []
|
||||
for region in sorted(bedrock_client.guardrails_scanned_regions):
|
||||
regional_guardrails = sorted(
|
||||
(
|
||||
guardrail
|
||||
for guardrail in bedrock_client.guardrails.values()
|
||||
if guardrail.region == region
|
||||
),
|
||||
key=lambda guardrail: guardrail.name,
|
||||
)
|
||||
|
||||
if regional_guardrails:
|
||||
for guardrail in regional_guardrails:
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=guardrail
|
||||
)
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Bedrock guardrail {guardrail.name} is available in region {region}. This does not confirm that the guardrail is attached to agents or used on model invocations."
|
||||
findings.append(report)
|
||||
else:
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource={})
|
||||
report.region = region
|
||||
report.resource_id = "bedrock-guardrails"
|
||||
report.resource_arn = f"arn:{bedrock_client.audited_partition}:bedrock:{region}:{bedrock_client.audited_account}:guardrails"
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Bedrock has no guardrails configured in region {region}."
|
||||
)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import Optional
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from pydantic.v1 import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -13,6 +14,8 @@ class Bedrock(AWSService):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.logging_configurations = {}
|
||||
self.guardrails = {}
|
||||
self.guardrails_scanned_regions = set()
|
||||
self.guardrails_scan_errors = {}
|
||||
self.__threading_call__(self._get_model_invocation_logging_configuration)
|
||||
self.__threading_call__(self._list_guardrails)
|
||||
self.__threading_call__(self._get_guardrail, self.guardrails.values())
|
||||
@@ -67,7 +70,18 @@ class Bedrock(AWSService):
|
||||
arn=guardrail["arn"],
|
||||
region=regional_client.region,
|
||||
)
|
||||
self.guardrails_scanned_regions.add(regional_client.region)
|
||||
except ClientError as error:
|
||||
self.guardrails_scan_errors[regional_client.region] = error.response[
|
||||
"Error"
|
||||
].get("Code", error.__class__.__name__)
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
self.guardrails_scan_errors[regional_client.region] = (
|
||||
error.__class__.__name__
|
||||
)
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import datetime
|
||||
from concurrent.futures import as_completed
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic.v1 import BaseModel
|
||||
@@ -14,9 +15,9 @@ class Codebuild(AWSService):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.projects = {}
|
||||
self.__threading_call__(self._list_projects)
|
||||
self.__threading_call__(self._list_builds_for_project, self.projects.values())
|
||||
self.__threading_call__(self._batch_get_builds, self.projects.values())
|
||||
self.__threading_call__(self._batch_get_projects, self.projects.values())
|
||||
self.__threading_call__(self._list_builds_for_project)
|
||||
self.__threading_call__(self._batch_get_builds)
|
||||
self.__threading_call__(self._batch_get_projects)
|
||||
self.report_groups = {}
|
||||
self.__threading_call__(self._list_report_groups)
|
||||
self.__threading_call__(
|
||||
@@ -44,10 +45,8 @@ class Codebuild(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _list_builds_for_project(self, project):
|
||||
logger.info("Codebuild - Listing builds...")
|
||||
def _fetch_project_last_build(self, regional_client, project):
|
||||
try:
|
||||
regional_client = self.regional_clients[project.region]
|
||||
build_ids = regional_client.list_builds_for_project(
|
||||
projectName=project.name
|
||||
).get("ids", [])
|
||||
@@ -58,28 +57,99 @@ class Codebuild(AWSService):
|
||||
f"{project.region}: {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _batch_get_builds(self, project):
|
||||
logger.info("Codebuild - Getting builds...")
|
||||
def _list_builds_for_project(self, regional_client):
|
||||
logger.info("Codebuild - Listing builds...")
|
||||
try:
|
||||
if project.last_build and project.last_build.id:
|
||||
regional_client = self.regional_clients[project.region]
|
||||
builds_by_id = regional_client.batch_get_builds(
|
||||
ids=[project.last_build.id]
|
||||
).get("builds", [])
|
||||
if len(builds_by_id) > 0:
|
||||
project.last_invoked_time = builds_by_id[0].get("endTime")
|
||||
regional_projects = [
|
||||
project
|
||||
for project in self.projects.values()
|
||||
if project.region == regional_client.region
|
||||
]
|
||||
|
||||
# list_builds_for_project has no batch API equivalent, so reuse the
|
||||
# shared thread pool to issue per-project calls in parallel within
|
||||
# this region — preserving the wall-clock performance of the
|
||||
# previous implementation.
|
||||
futures = [
|
||||
self.thread_pool.submit(
|
||||
self._fetch_project_last_build, regional_client, project
|
||||
)
|
||||
for project in regional_projects
|
||||
]
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
future.result()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region}: {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _batch_get_projects(self, project):
|
||||
def _batch_get_builds(self, regional_client):
|
||||
logger.info("Codebuild - Getting builds...")
|
||||
try:
|
||||
# Collect all build IDs for this region
|
||||
build_id_to_project = {}
|
||||
for project in self.projects.values():
|
||||
if (
|
||||
project.region == regional_client.region
|
||||
and project.last_build
|
||||
and project.last_build.id
|
||||
):
|
||||
build_id_to_project[project.last_build.id] = project
|
||||
|
||||
if not build_id_to_project:
|
||||
return
|
||||
|
||||
build_ids = list(build_id_to_project.keys())
|
||||
|
||||
# batch_get_builds supports up to 100 IDs per call
|
||||
for i in range(0, len(build_ids), 100):
|
||||
batch = build_ids[i : i + 100]
|
||||
response = regional_client.batch_get_builds(ids=batch)
|
||||
for build_info in response.get("builds", []):
|
||||
build_id = build_info.get("id")
|
||||
if build_id in build_id_to_project:
|
||||
end_time = build_info.get("endTime")
|
||||
if end_time:
|
||||
build_id_to_project[build_id].last_invoked_time = end_time
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _batch_get_projects(self, regional_client):
|
||||
logger.info("Codebuild - Getting projects...")
|
||||
try:
|
||||
regional_client = self.regional_clients[project.region]
|
||||
project_info = regional_client.batch_get_projects(names=[project.name])[
|
||||
"projects"
|
||||
][0]
|
||||
# Collect all project names for this region
|
||||
regional_projects = {
|
||||
arn: project
|
||||
for arn, project in self.projects.items()
|
||||
if project.region == regional_client.region
|
||||
}
|
||||
if not regional_projects:
|
||||
return
|
||||
|
||||
project_names = [project.name for project in regional_projects.values()]
|
||||
|
||||
# batch_get_projects supports up to 100 names per call
|
||||
for i in range(0, len(project_names), 100):
|
||||
batch = project_names[i : i + 100]
|
||||
response = regional_client.batch_get_projects(names=batch)
|
||||
for project_info in response.get("projects", []):
|
||||
project_arn = project_info.get("arn")
|
||||
if project_arn in regional_projects:
|
||||
self._parse_project_info(
|
||||
regional_projects[project_arn], project_info
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _parse_project_info(self, project, project_info):
|
||||
try:
|
||||
project.buildspec = project_info["source"].get("buildspec")
|
||||
if project_info["source"]["type"] != "NO_SOURCE":
|
||||
project.source = Source(
|
||||
|
||||
@@ -9,15 +9,13 @@ from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
class GlobalAccelerator(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__(__class__.__name__, provider)
|
||||
# Global Accelerator is a global service that supports endpoints in multiple AWS Regions
|
||||
# but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators.
|
||||
# That is, for example, specify --region us-west-2 on AWS CLI commands.
|
||||
region = "us-west-2" if provider.identity.partition == "aws" else None
|
||||
super().__init__(__class__.__name__, provider, region=region)
|
||||
self.accelerators = {}
|
||||
if self.audited_partition == "aws":
|
||||
# Global Accelerator is a global service that supports endpoints in multiple AWS Regions
|
||||
# but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators.
|
||||
# That is, for example, specify --region us-west-2 on AWS CLI commands.
|
||||
self.region = "us-west-2"
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
self._list_accelerators()
|
||||
self.__threading_call__(self._list_tags, self.accelerators.values())
|
||||
|
||||
|
||||
+6
-5
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "route53_dangling_ip_subdomain_takeover",
|
||||
"CheckTitle": "Route53 A record does not point to a dangling IP address",
|
||||
"CheckTitle": "Route53 record does not point to a dangling AWS resource",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
|
||||
"TTPs/Initial Access",
|
||||
@@ -13,13 +13,14 @@
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsRoute53HostedZone",
|
||||
"ResourceGroup": "network",
|
||||
"Description": "**Route 53 `A` records** (non-alias) that use literal IPs are evaluated for **public AWS addresses** not currently assigned to resources in the account. Entries that match AWS ranges yet lack ownership are identified as potential **dangling IP targets**.",
|
||||
"Risk": "**Dangling DNS `A` records** pointing to released AWS IPs enable **subdomain takeover**. An attacker who later obtains that IP can:\n- Redirect or alter content (integrity)\n- Capture credentials/cookies (confidentiality)\n- Disrupt or impersonate services (availability)",
|
||||
"Description": "**Route 53 records** are evaluated for two **subdomain takeover** vectors: (1) non-alias **`A` records** using literal IPs in **public AWS ranges** that are not assigned to resources in the account (released EIPs/ENI public IPs); and (2) non-alias **`CNAME` records** targeting an **S3 website endpoint** (`*.s3-website[.-]<region>.amazonaws.com`) whose bucket no longer exists in the account.",
|
||||
"Risk": "**Dangling DNS records** pointing to released AWS resources enable **subdomain takeover**. An attacker who later claims the IP — or registers an S3 bucket with the same name in any AWS account — can:\n- Redirect or alter content (integrity)\n- Capture credentials/cookies (confidentiality)\n- Disrupt or impersonate services (availability)",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://support.icompaas.com/support/solutions/articles/62000233461-ensure-route53-records-contains-dangling-ips-",
|
||||
"https://www.trendmicro.com/trendaivisiononecloudriskmanagement/knowledge-base/aws/Route53/dangling-dns-records.html",
|
||||
"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html"
|
||||
"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html",
|
||||
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteEndpoints.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
@@ -29,7 +30,7 @@
|
||||
"Terraform": "```hcl\n# Terraform: convert A record to Alias to avoid dangling public IPs\nresource \"aws_route53_record\" \"<example_resource_name>\" {\n zone_id = \"<example_resource_id>\"\n name = \"<example_resource_name>\"\n type = \"A\"\n\n alias { # CRITICAL: Alias to AWS resource (no direct IP)\n name = \"<ALIAS_TARGET_DNS_NAME>\" # e.g., dualstack.<alb>.amazonaws.com\n zone_id = \"<ALIAS_TARGET_HOSTED_ZONE_ID>\"\n evaluate_target_health = false\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Remove or update any record that points to an unassigned IP. Avoid hard-coding AWS public IPs in `A` records; use **aliases/CNAMEs** to managed endpoints. Enforce **asset lifecycle** decommissioning, routine DNS-asset reconciliation, and **change control** with monitoring to prevent and detect drift.",
|
||||
"Text": "Remove or update any record that points to an unowned AWS resource: unassigned public IPs in `A` records and S3 website endpoints in `CNAME` records whose bucket has been deleted. Avoid hard-coding AWS public IPs in `A` records; prefer **aliases** to managed endpoints (ALB, CloudFront, S3) and delete CNAMEs as soon as the backing bucket is removed. Enforce **asset lifecycle** decommissioning, routine DNS-asset reconciliation, and **change control** with monitoring to prevent and detect drift.",
|
||||
"Url": "https://hub.prowler.com/check/route53_dangling_ip_subdomain_takeover"
|
||||
}
|
||||
},
|
||||
|
||||
+43
-11
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from ipaddress import ip_address
|
||||
|
||||
import awsipranges
|
||||
@@ -6,6 +7,14 @@ from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.lib.utils.utils import validate_ip_address
|
||||
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
from prowler.providers.aws.services.route53.route53_client import route53_client
|
||||
from prowler.providers.aws.services.s3.s3_client import s3_client
|
||||
|
||||
# S3 website endpoint formats:
|
||||
# <bucket>.s3-website-<region>.amazonaws.com (legacy, dash)
|
||||
# <bucket>.s3-website.<region>.amazonaws.com (newer, dot)
|
||||
S3_WEBSITE_ENDPOINT_REGEX = re.compile(
|
||||
r"^(?P<bucket>[^.]+(?:\.[^.]+)*)\.s3-website[.-](?P<region>[a-z0-9-]+)\.amazonaws\.com\.?$"
|
||||
)
|
||||
|
||||
|
||||
class route53_dangling_ip_subdomain_takeover(Check):
|
||||
@@ -24,11 +33,14 @@ class route53_dangling_ip_subdomain_takeover(Check):
|
||||
if ni.association and ni.association.get("PublicIp"):
|
||||
public_ips.append(ni.association.get("PublicIp"))
|
||||
|
||||
owned_bucket_names = {bucket.name for bucket in s3_client.buckets.values()}
|
||||
|
||||
for record_set in route53_client.record_sets:
|
||||
# Check only A records and avoid aliases (only need to check IPs not AWS Resources)
|
||||
hosted_zone = route53_client.hosted_zones[record_set.hosted_zone_id]
|
||||
|
||||
# A records: dangling-IP path (released EIPs / unowned AWS IPs)
|
||||
if record_set.type == "A" and not record_set.is_alias:
|
||||
for record in record_set.records:
|
||||
# Check if record is an IP Address
|
||||
if validate_ip_address(record):
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=record_set
|
||||
@@ -36,25 +48,45 @@ class route53_dangling_ip_subdomain_takeover(Check):
|
||||
report.resource_id = (
|
||||
f"{record_set.hosted_zone_id}/{record_set.name}/{record}"
|
||||
)
|
||||
report.resource_arn = route53_client.hosted_zones[
|
||||
record_set.hosted_zone_id
|
||||
].arn
|
||||
report.resource_tags = route53_client.hosted_zones[
|
||||
record_set.hosted_zone_id
|
||||
].tags
|
||||
report.resource_arn = hosted_zone.arn
|
||||
report.resource_tags = hosted_zone.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is not a dangling IP."
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} is not a dangling IP."
|
||||
# If Public IP check if it is in the AWS Account
|
||||
if (
|
||||
not ip_address(record).is_private
|
||||
and record not in public_ips
|
||||
):
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} does not belong to AWS and it is not a dangling IP."
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} does not belong to AWS and it is not a dangling IP."
|
||||
# Check if potential dangling IP is within AWS Ranges
|
||||
aws_ip_ranges = awsipranges.get_ranges()
|
||||
if aws_ip_ranges.get(record):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is a dangling IP which can lead to a subdomain takeover attack."
|
||||
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} is a dangling IP which can lead to a subdomain takeover attack."
|
||||
findings.append(report)
|
||||
|
||||
# CNAME records: dangling S3 website endpoint
|
||||
# (deleted bucket whose name can be re-registered by anyone)
|
||||
elif record_set.type == "CNAME" and not record_set.is_alias:
|
||||
for record in record_set.records:
|
||||
match = S3_WEBSITE_ENDPOINT_REGEX.match(record.lower())
|
||||
if not match:
|
||||
continue
|
||||
bucket_name = match.group("bucket")
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=record_set
|
||||
)
|
||||
report.resource_id = (
|
||||
f"{record_set.hosted_zone_id}/{record_set.name}/{record}"
|
||||
)
|
||||
report.resource_arn = hosted_zone.arn
|
||||
report.resource_tags = hosted_zone.tags
|
||||
if bucket_name in owned_bucket_names:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Route53 CNAME {record_set.name} in Hosted Zone {hosted_zone.name} points to S3 website endpoint of bucket {bucket_name} which exists in the account."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 CNAME {record_set.name} in Hosted Zone {hosted_zone.name} points to S3 website endpoint of bucket {bucket_name} which does not exist in the account and can lead to a subdomain takeover attack."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -176,14 +176,12 @@ class RecordSet(BaseModel):
|
||||
|
||||
class Route53Domains(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__(__class__.__name__, provider)
|
||||
# Route53Domains is a global service that supports endpoints in multiple AWS Regions
|
||||
# but you must specify the US East (N. Virginia) Region to create, update, or otherwise work with domains.
|
||||
region = "us-east-1" if provider.identity.partition == "aws" else None
|
||||
super().__init__(__class__.__name__, provider, region=region)
|
||||
self.domains = {}
|
||||
if self.audited_partition == "aws":
|
||||
# Route53Domains is a global service that supports endpoints in multiple AWS Regions
|
||||
# but you must specify the US East (N. Virginia) Region to create, update, or otherwise work with domains.
|
||||
self.region = "us-east-1"
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
self._list_domains()
|
||||
self._get_domain_detail()
|
||||
self._list_tags_for_domain()
|
||||
|
||||
@@ -9,20 +9,20 @@ from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
class TrustedAdvisor(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__("support", provider)
|
||||
# Support API is not available in China Partition
|
||||
# But only in us-east-1 or us-gov-west-1 https://docs.aws.amazon.com/general/latest/gr/awssupport.html
|
||||
partition = provider.identity.partition
|
||||
if partition == "aws":
|
||||
support_region = "us-east-1"
|
||||
elif partition == "aws-cn":
|
||||
support_region = None
|
||||
else:
|
||||
support_region = "us-gov-west-1"
|
||||
super().__init__("support", provider, region=support_region)
|
||||
self.account_arn_template = f"arn:{self.audited_partition}:trusted-advisor:{self.region}:{self.audited_account}:account"
|
||||
self.checks = []
|
||||
self.premium_support = PremiumSupport(enabled=False)
|
||||
# Support API is not available in China Partition
|
||||
# But only in us-east-1 or us-gov-west-1 https://docs.aws.amazon.com/general/latest/gr/awssupport.html
|
||||
if self.audited_partition != "aws-cn":
|
||||
if self.audited_partition == "aws":
|
||||
support_region = "us-east-1"
|
||||
else:
|
||||
support_region = "us-gov-west-1"
|
||||
self.client = self.session.client(self.service, region_name=support_region)
|
||||
self.client.region = support_region
|
||||
self._describe_services()
|
||||
if getattr(self.premium_support, "enabled", False):
|
||||
self._describe_trusted_advisor_checks()
|
||||
@@ -34,13 +34,13 @@ class TrustedAdvisor(AWSService):
|
||||
for check in self.client.describe_trusted_advisor_checks(language="en").get(
|
||||
"checks", []
|
||||
):
|
||||
check_arn = f"arn:{self.audited_partition}:trusted-advisor:{self.client.region}:{self.audited_account}:check/{check['id']}"
|
||||
check_arn = f"arn:{self.audited_partition}:trusted-advisor:{self.region}:{self.audited_account}:check/{check['id']}"
|
||||
self.checks.append(
|
||||
Check(
|
||||
id=check["id"],
|
||||
name=check["name"],
|
||||
arn=check_arn,
|
||||
region=self.client.region,
|
||||
region=self.region,
|
||||
)
|
||||
)
|
||||
except ClientError as error:
|
||||
@@ -50,22 +50,22 @@ class TrustedAdvisor(AWSService):
|
||||
== "Amazon Web Services Premium Support Subscription is required to use this service."
|
||||
):
|
||||
logger.warning(
|
||||
f"{self.client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _describe_trusted_advisor_check_result(self):
|
||||
logger.info("TrustedAdvisor - Describing Check Result...")
|
||||
try:
|
||||
for check in self.checks:
|
||||
if check.region == self.client.region:
|
||||
if check.region == self.region:
|
||||
try:
|
||||
response = self.client.describe_trusted_advisor_check_result(
|
||||
checkId=check.id
|
||||
@@ -78,11 +78,11 @@ class TrustedAdvisor(AWSService):
|
||||
== "InvalidParameterValueException"
|
||||
):
|
||||
logger.warning(
|
||||
f"{self.client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def _describe_services(self):
|
||||
|
||||
@@ -9,15 +9,13 @@ from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
class WAF(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__("waf", provider)
|
||||
# AWS WAF is available globally for CloudFront distributions, but you must use the Region US East (N. Virginia) to create your web ACL and any resources used in the web ACL, such as rule groups, IP sets, and regex pattern sets.
|
||||
region = "us-east-1" if provider.identity.partition == "aws" else None
|
||||
super().__init__("waf", provider, region=region)
|
||||
self.rules = {}
|
||||
self.rule_groups = {}
|
||||
self.web_acls = {}
|
||||
if self.audited_partition == "aws":
|
||||
# AWS WAF is available globally for CloudFront distributions, but you must use the Region US East (N. Virginia) to create your web ACL and any resources used in the web ACL, such as rule groups, IP sets, and regex pattern sets.
|
||||
self.region = "us-east-1"
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
self._list_rules()
|
||||
self.__threading_call__(self._get_rule, self.rules.values())
|
||||
self._list_rule_groups()
|
||||
|
||||
@@ -11,13 +11,11 @@ from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
class WAFv2(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
super().__init__(__class__.__name__, provider)
|
||||
# AWS WAFv2 is available globally for CloudFront distributions, but you must use the Region US East (N. Virginia) to create your web ACL.
|
||||
region = "us-east-1" if provider.identity.partition == "aws" else None
|
||||
super().__init__(__class__.__name__, provider, region=region)
|
||||
self.web_acls = {}
|
||||
if self.audited_partition == "aws":
|
||||
# AWS WAFv2 is available globally for CloudFront distributions, but you must use the Region US East (N. Virginia) to create your web ACL.
|
||||
self.region = "us-east-1"
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
self._list_web_acls_global()
|
||||
self.__threading_call__(self._list_web_acls_regional)
|
||||
self.__threading_call__(self._get_web_acl, self.web_acls.values())
|
||||
|
||||
+1
-1
@@ -13,7 +13,7 @@
|
||||
"Risk": "Allowing `TLS 1.0/1.1` enables protocol downgrades and weak cipher negotiation, exposing HTTPS traffic to **MITM** interception, credential theft, and tampering. This undermines the **confidentiality** and **integrity** of sessions and data in transit, and can enable account takeover via stolen tokens.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/+azure/app-service/overview-tls",
|
||||
"https://learn.microsoft.com/en-us/azure/app-service/overview-tls",
|
||||
"https://learn.microsoft.com/en-us/azure/app-service/configure-ssl-bindings#enforce-tls-versions",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/AppService/latest-version-of-tls-encryption-in-use.html",
|
||||
"https://icompaas.freshdesk.com/support/solutions/articles/62000234773-ensure-that-minimum-tls-version-is-set-to-tls-v1-2-or-higher"
|
||||
|
||||
+8
-8
@@ -9,8 +9,8 @@
|
||||
"Severity": "high",
|
||||
"ResourceType": "microsoft.network/networkwatchers",
|
||||
"ResourceGroup": "network",
|
||||
"Description": "**Azure Network Watcher** has **NSG flow logs** enabled and configured to forward traffic records to a centralized **Log Analytics workspace**",
|
||||
"Risk": "Missing or disabled flow logging blinds visibility into network behavior, hindering detection of:\n- **Lateral movement** and internal scanning\n- **C2 beacons** and exfiltration patterns\nThis degrades incident response and correlation, impacting **confidentiality** and **integrity**.",
|
||||
"Description": "**Azure Network Watcher** has **flow logs** enabled for supported targets, such as **virtual networks** and **network security groups**, and configured with **Traffic Analytics** to forward records to a centralized **Log Analytics workspace**",
|
||||
"Risk": "Missing, disabled, or non-centralized flow logging blinds visibility into network behavior, hindering detection of:\n- **Lateral movement** and internal scanning\n- **C2 beacons** and exfiltration patterns\nThis degrades incident response and correlation, impacting **confidentiality** and **integrity**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/network-watcher/vnet-flow-logs-tutorial",
|
||||
@@ -18,13 +18,13 @@
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az network watcher flow-log create --location <REGION> --name <FLOW_LOG_NAME> --resource-group <RESOURCE_GROUP> --nsg <NSG_NAME> --storage-account <STORAGE_ACCOUNT_NAME> --enabled true --workspace <LOG_ANALYTICS_WORKSPACE_ID>",
|
||||
"NativeIaC": "```bicep\n// Enable NSG flow logs and send to Log Analytics\nresource flowLog 'Microsoft.Network/networkWatchers/flowLogs@2022-09-01' = {\n name: '<example_resource_name>/<example_resource_name>'\n location: '<REGION>'\n properties: {\n enabled: true // CRITICAL: turns on flow logs\n targetResourceId: '<example_resource_id>' // NSG resource ID\n storageId: '<example_resource_id>' // required for NSG flow logs\n flowAnalyticsConfiguration: {\n networkWatcherFlowAnalyticsConfiguration: {\n enabled: true // CRITICAL: sends flow logs to Log Analytics\n workspaceResourceId: '<example_resource_id>' // Log Analytics workspace resource ID\n }\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure portal, go to Network Watcher > Flow logs\n2. Click + Create (or Create flow log)\n3. Select the target NSG and region\n4. Set Status to On\n5. Select a Storage account\n6. Enable Traffic analytics, then select your Log Analytics workspace\n7. Click Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Enable NSG flow logs and send to Log Analytics\nresource \"azurerm_network_watcher_flow_log\" \"<example_resource_name>\" {\n network_watcher_name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n network_security_group_id = \"<example_resource_id>\"\n storage_account_id = \"<example_resource_id>\"\n\n enabled = true # CRITICAL: turns on flow logs\n\n traffic_analytics { \n enabled = true # CRITICAL: sends flow logs to Log Analytics\n workspace_id = \"<example_resource_id>\" # workspace_id (GUID) or use data source\n workspace_region = \"<REGION>\"\n workspace_resource_id = \"<example_resource_id>\" # Log Analytics workspace resource ID\n }\n}\n```"
|
||||
"CLI": "az network watcher flow-log create --location <REGION> --name <FLOW_LOG_NAME> --resource-group <RESOURCE_GROUP> --target-resource-id <TARGET_RESOURCE_ID> --storage-account <STORAGE_ACCOUNT_ID> --enabled true --workspace <LOG_ANALYTICS_WORKSPACE_ID>",
|
||||
"NativeIaC": "```bicep\n// Enable flow logs for a supported target (for example, a virtual network or NSG)\nresource flowLog 'Microsoft.Network/networkWatchers/flowLogs@2023-09-01' = {\n name: '<example_network_watcher_name>/<example_flow_log_name>'\n location: '<REGION>'\n properties: {\n enabled: true\n targetResourceId: '<example_target_resource_id>'\n storageId: '<example_storage_account_id>'\n flowAnalyticsConfiguration: {\n networkWatcherFlowAnalyticsConfiguration: {\n enabled: true\n workspaceResourceId: '<example_log_analytics_workspace_id>'\n }\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure portal, go to Network Watcher > Flow logs\n2. Click + Create\n3. Select the subscription and region\n4. Choose the appropriate flow log type and target resource, such as a virtual network or network security group\n5. Set Status to On\n6. Select a Storage account\n7. Enable Traffic analytics and select the Log Analytics workspace\n8. Click Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Enable flow logs for a supported target and send analytics to Log Analytics\nresource \"azurerm_network_watcher_flow_log\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n network_watcher_name = \"<example_network_watcher_name>\"\n resource_group_name = \"<example_resource_group_name>\"\n target_resource_id = \"<example_target_resource_id>\"\n storage_account_id = \"<example_storage_account_id>\"\n\n enabled = true\n\n traffic_analytics {\n enabled = true\n workspace_id = \"<example_workspace_id>\"\n workspace_region = \"<REGION>\"\n workspace_resource_id = \"<example_log_analytics_workspace_id>\"\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable and centrally aggregate **NSG flow logs** to a **Log Analytics workspace**.\n\n- Enforce least privilege on log data\n- Define retention and secure storage\n- Use layered monitoring (e.g., Traffic Analytics)\n- Ensure coverage across regions/subscriptions and critical NSGs",
|
||||
"Text": "Enable and centrally aggregate **flow logs** for supported Network Watcher targets, including **virtual networks** and **network security groups**, to a **Log Analytics workspace**.\n\n- Enforce least privilege on log data\n- Define retention and secure storage\n- Use layered monitoring (e.g., Traffic Analytics)\n- Ensure coverage across regions, subscriptions, and critical network segments",
|
||||
"Url": "https://hub.prowler.com/check/network_flow_log_captured_sent"
|
||||
}
|
||||
},
|
||||
@@ -34,5 +34,5 @@
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor."
|
||||
"Notes": "Configuring flow logs and Traffic Analytics increases storage and analytics costs. For new Azure deployments, prefer virtual network flow logs where they satisfy your monitoring requirements because NSG flow logs are on the retirement path."
|
||||
}
|
||||
|
||||
+18
-8
@@ -11,16 +11,26 @@ class network_flow_log_captured_sent(Check):
|
||||
metadata=self.metadata(), resource=network_watcher
|
||||
)
|
||||
report.subscription = subscription
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has no flow logs"
|
||||
if network_watcher.flow_logs:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs disabled"
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs that are captured and sent to Log Analytics workspace"
|
||||
has_failed = False
|
||||
for flow_log in network_watcher.flow_logs:
|
||||
if flow_log.enabled:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs that are captured and sent to Log Analytics workspace"
|
||||
break
|
||||
if not has_failed:
|
||||
if not flow_log.enabled:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has flow logs disabled"
|
||||
has_failed = True
|
||||
elif not (
|
||||
flow_log.traffic_analytics_enabled
|
||||
and flow_log.workspace_resource_id
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has enabled flow logs that are not configured to send traffic analytics to a Log Analytics workspace"
|
||||
has_failed = True
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Network Watcher {network_watcher.name} from subscription {subscription} has no flow logs"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
+8
-8
@@ -9,8 +9,8 @@
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.network/networkwatchers",
|
||||
"ResourceGroup": "network",
|
||||
"Description": "**Azure Network Watcher** has **NSG flow logs** enabled and configured to retain for at least `90` days (or `0` for unlimited). The evaluation checks that flow logging is enabled and that the retention policy meets the required duration for each configured log.",
|
||||
"Risk": "Absent or short-retained **NSG flow logs** reduce visibility into IP flows, delaying detection of port scans, brute force, data exfiltration, and lateral movement.\n\nForensics and accountability degrade, threatening **confidentiality** and **integrity**.",
|
||||
"Description": "**Azure Network Watcher** has **flow logs** enabled for supported targets, such as **virtual networks** and **network security groups**, and configured to retain for at least `90` days (or `0` for unlimited). The evaluation checks that flow logging is enabled and that the retention policy meets the required duration for each configured log.",
|
||||
"Risk": "Absent or short-retained **flow logs** reduce visibility into IP flows, delaying detection of port scans, brute force, data exfiltration, and lateral movement.\n\nForensics and accountability degrade, threatening **confidentiality** and **integrity**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest",
|
||||
@@ -20,13 +20,13 @@
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az network watcher flow-log create --location <LOCATION> --name <example_resource_name> --nsg <example_resource_id> --storage-account <example_resource_id> --retention 90",
|
||||
"NativeIaC": "```bicep\n// Enable NSG flow logs with retention >= 90 days\nresource flowlog 'Microsoft.Network/networkWatchers/flowLogs@2023-09-01' = {\n name: '<example_resource_name>/<example_resource_name>'\n location: '<LOCATION>'\n properties: {\n targetResourceId: '<example_resource_id>'\n storageId: '<example_resource_id>'\n enabled: true // critical: turns on flow logs\n retentionPolicy: {\n enabled: true // critical: activates retention policy\n days: 90 // critical: 0 (unlimited) or >= 90 to pass\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to Network Watcher > NSG flow logs\n2. Select the NSG to configure\n3. Set Status to On\n4. Set Retention (days) to 0 (unlimited) or at least 90\n5. Select a Storage account\n6. Click Save",
|
||||
"Terraform": "```hcl\n# Enable NSG flow logs with retention >= 90 days\nresource \"azurerm_network_watcher_flow_log\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n network_watcher_name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n target_resource_id = \"<example_resource_id>\"\n storage_account_id = \"<example_resource_id>\"\n\n enabled = true # critical: turns on flow logs\n\n retention_policy {\n enabled = true # critical: activates retention policy\n days = 90 # critical: 0 (unlimited) or >= 90 to pass\n }\n}\n```"
|
||||
"CLI": "az network watcher flow-log create --location <LOCATION> --name <example_flow_log_name> --target-resource-id <example_target_resource_id> --storage-account <example_storage_account_id> --enabled true --retention 90",
|
||||
"NativeIaC": "```bicep\n// Enable flow logs with retention >= 90 days for a supported target\nresource flowlog 'Microsoft.Network/networkWatchers/flowLogs@2023-09-01' = {\n name: '<example_network_watcher_name>/<example_flow_log_name>'\n location: '<LOCATION>'\n properties: {\n targetResourceId: '<example_target_resource_id>'\n storageId: '<example_storage_account_id>'\n enabled: true\n retentionPolicy: {\n enabled: true\n days: 90\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to Network Watcher > Flow logs\n2. Select the relevant flow log or create one for the target resource, such as a virtual network or network security group\n3. Set Status to On\n4. Set Retention (days) to 0 (unlimited) or at least 90\n5. Select a Storage account\n6. Click Save or Review + create",
|
||||
"Terraform": "```hcl\n# Enable flow logs with retention >= 90 days\nresource \"azurerm_network_watcher_flow_log\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n network_watcher_name = \"<example_network_watcher_name>\"\n resource_group_name = \"<example_resource_group_name>\"\n target_resource_id = \"<example_target_resource_id>\"\n storage_account_id = \"<example_storage_account_id>\"\n\n enabled = true\n\n retention_policy {\n enabled = true\n days = 90\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **NSG flow logs** and keep retention `90` days (`0` for unlimited). Restrict and monitor access to logs, store immutably, and stream to a SIEM to detect anomalies. Apply **defense in depth** and **least privilege**. Plan migration to **Virtual network flow logs** as NSG flow logs are being retired.",
|
||||
"Text": "Enable **flow logs** and keep retention `90` days (`0` for unlimited) for supported targets, including **virtual networks** and **network security groups**. Restrict and monitor access to logs, store immutably, and stream to a SIEM to detect anomalies. Apply **defense in depth** and **least privilege**. Prefer **virtual network flow logs** for new deployments as NSG flow logs are being retired.",
|
||||
"Url": "https://hub.prowler.com/check/network_flow_log_more_than_90_days"
|
||||
}
|
||||
},
|
||||
@@ -36,5 +36,5 @@
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use."
|
||||
"Notes": "Longer retention improves investigation depth but increases storage cost. For new Azure deployments, prefer virtual network flow logs where they satisfy your monitoring requirements because NSG flow logs are on the retirement path."
|
||||
}
|
||||
|
||||
@@ -79,6 +79,9 @@ class Network(AzureService):
|
||||
id=flow_log.id,
|
||||
name=flow_log.name,
|
||||
enabled=flow_log.enabled,
|
||||
target_resource_id=getattr(
|
||||
flow_log, "target_resource_id", None
|
||||
),
|
||||
retention_policy=RetentionPolicy(
|
||||
enabled=(
|
||||
flow_log.retention_policy.enabled
|
||||
@@ -91,6 +94,34 @@ class Network(AzureService):
|
||||
else 0
|
||||
),
|
||||
),
|
||||
traffic_analytics_enabled=bool(
|
||||
getattr(
|
||||
getattr(
|
||||
getattr(
|
||||
flow_log,
|
||||
"flow_analytics_configuration",
|
||||
None,
|
||||
),
|
||||
"network_watcher_flow_analytics_configuration",
|
||||
None,
|
||||
),
|
||||
"enabled",
|
||||
False,
|
||||
)
|
||||
),
|
||||
workspace_resource_id=getattr(
|
||||
getattr(
|
||||
getattr(
|
||||
flow_log,
|
||||
"flow_analytics_configuration",
|
||||
None,
|
||||
),
|
||||
"network_watcher_flow_analytics_configuration",
|
||||
None,
|
||||
),
|
||||
"workspace_resource_id",
|
||||
None,
|
||||
),
|
||||
)
|
||||
for flow_log in flow_logs
|
||||
],
|
||||
@@ -192,6 +223,9 @@ class FlowLog:
|
||||
name: str
|
||||
enabled: bool
|
||||
retention_policy: RetentionPolicy
|
||||
target_resource_id: Optional[str] = None
|
||||
traffic_analytics_enabled: bool = False
|
||||
workspace_resource_id: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -2,21 +2,51 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
import tldextract
|
||||
|
||||
from prowler.config.config import prowler_version
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.image.exceptions.exceptions import ImageRegistryNetworkError
|
||||
from prowler.providers.image.exceptions.exceptions import (
|
||||
ImageRegistryAuthError,
|
||||
ImageRegistryNetworkError,
|
||||
)
|
||||
|
||||
_MAX_RETRIES = 3
|
||||
_BACKOFF_BASE = 1
|
||||
_USER_AGENT = f"Prowler/{prowler_version} (registry-adapter)"
|
||||
|
||||
_NON_PUBLIC_IP_PROPERTIES = (
|
||||
"is_private",
|
||||
"is_loopback",
|
||||
"is_link_local",
|
||||
"is_multicast",
|
||||
"is_reserved",
|
||||
"is_unspecified",
|
||||
)
|
||||
|
||||
|
||||
def _ip_is_non_public(ip_str: str) -> bool:
|
||||
try:
|
||||
addr = ipaddress.ip_address(ip_str)
|
||||
except ValueError:
|
||||
return False
|
||||
return any(getattr(addr, prop) for prop in _NON_PUBLIC_IP_PROPERTIES)
|
||||
|
||||
|
||||
def _registrable_domain(host: str) -> str | None:
|
||||
ext = tldextract.extract(host)
|
||||
if not ext.domain or not ext.suffix:
|
||||
return None
|
||||
return f"{ext.domain}.{ext.suffix}"
|
||||
|
||||
|
||||
class RegistryAdapter(ABC):
|
||||
"""Abstract base class for registry adapters."""
|
||||
@@ -68,6 +98,107 @@ class RegistryAdapter(ABC):
|
||||
"""Enumerate all tags for a repository."""
|
||||
...
|
||||
|
||||
def _origin_url(self) -> str:
|
||||
"""The URL whose host the validator compares against when enforce_origin=True.
|
||||
|
||||
Subclasses can override if the effective registry origin differs from
|
||||
``registry_url`` (e.g., Docker Hub talks to ``registry-1.docker.io``).
|
||||
"""
|
||||
return self.registry_url
|
||||
|
||||
def _validate_outbound_url(
|
||||
self,
|
||||
url: str,
|
||||
*,
|
||||
enforce_origin: bool = True,
|
||||
origin_url: str | None = None,
|
||||
) -> str:
|
||||
"""Validate a URL before it is passed to ``requests``.
|
||||
|
||||
Defenses against parser-mismatch SSRF (PRWLRHELP-2103):
|
||||
- canonicalise via ``requests.PreparedRequest`` so validator and connector
|
||||
parse the same string the same way;
|
||||
- reject schemes other than http/https;
|
||||
- reject literal non-public IPs (private, loopback, link-local, ...);
|
||||
- reject hostnames whose A/AAAA records resolve to non-public IPs;
|
||||
- when ``enforce_origin=True``, reject hosts that don't share the
|
||||
registry's registrable domain.
|
||||
|
||||
Returns the canonical URL the caller should pass to ``requests``.
|
||||
"""
|
||||
parsed = urlparse(url)
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=(
|
||||
f"Disallowed URL scheme: {parsed.scheme!r}. Only http/https are allowed."
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
prepared = requests.Request("GET", url).prepare()
|
||||
except (
|
||||
requests.exceptions.InvalidURL,
|
||||
requests.exceptions.MissingSchema,
|
||||
ValueError,
|
||||
) as exc:
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=f"Malformed URL {url!r}: {exc}",
|
||||
)
|
||||
|
||||
canonical_url = prepared.url
|
||||
canonical = urlparse(canonical_url)
|
||||
host = canonical.hostname or ""
|
||||
if not host:
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=f"URL has no host: {canonical_url}",
|
||||
)
|
||||
|
||||
try:
|
||||
addr = ipaddress.ip_address(host)
|
||||
except ValueError:
|
||||
try:
|
||||
infos = socket.getaddrinfo(host, None)
|
||||
except socket.gaierror:
|
||||
infos = []
|
||||
for *_, sockaddr in infos:
|
||||
resolved_ip = sockaddr[0]
|
||||
if _ip_is_non_public(resolved_ip):
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=(
|
||||
f"Host {host!r} resolves to non-public address {resolved_ip}. "
|
||||
"This may indicate an SSRF attempt."
|
||||
),
|
||||
)
|
||||
else:
|
||||
if any(getattr(addr, prop) for prop in _NON_PUBLIC_IP_PROPERTIES):
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=(
|
||||
f"URL targets a non-public address: {host}. "
|
||||
"This may indicate an SSRF attempt."
|
||||
),
|
||||
)
|
||||
|
||||
if enforce_origin:
|
||||
registry_host = urlparse(origin_url or self._origin_url()).hostname or ""
|
||||
if registry_host and host != registry_host:
|
||||
target_d = _registrable_domain(host)
|
||||
registry_d = _registrable_domain(registry_host)
|
||||
if not (target_d and registry_d and target_d == registry_d):
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=(
|
||||
f"URL host {host!r} is unrelated to registry host "
|
||||
f"{registry_host!r}; refusing to follow."
|
||||
),
|
||||
)
|
||||
|
||||
return canonical_url
|
||||
|
||||
def _request_with_retry(self, method: str, url: str, **kwargs) -> requests.Response:
|
||||
context_label = kwargs.pop("context_label", None) or self.registry_url
|
||||
kwargs.setdefault("timeout", 30)
|
||||
@@ -131,16 +262,15 @@ class RegistryAdapter(ABC):
|
||||
original_exception=last_exception,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _next_page_url(resp: requests.Response) -> str | None:
|
||||
def _next_page_url(self, resp: requests.Response) -> str | None:
|
||||
link_header = resp.headers.get("Link", "")
|
||||
if not link_header:
|
||||
return None
|
||||
match = re.search(r'<([^>]+)>;\s*rel="next"', link_header)
|
||||
if match:
|
||||
url = match.group(1)
|
||||
if url.startswith("/"):
|
||||
parsed = urlparse(resp.url)
|
||||
return f"{parsed.scheme}://{parsed.netloc}{url}"
|
||||
return url
|
||||
return None
|
||||
if not match:
|
||||
return None
|
||||
url = match.group(1)
|
||||
if url.startswith("/"):
|
||||
parsed = urlparse(resp.url)
|
||||
url = f"{parsed.scheme}://{parsed.netloc}{url}"
|
||||
return self._validate_outbound_url(url)
|
||||
|
||||
@@ -207,15 +207,14 @@ class DockerHubAdapter(RegistryAdapter):
|
||||
message=f"Unexpected error during {context} on Docker Hub (HTTP {resp.status_code}): {resp.text[:200]}",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _next_tag_page_url(resp: requests.Response) -> str | None:
|
||||
def _next_tag_page_url(self, resp: requests.Response) -> str | None:
|
||||
link_header = resp.headers.get("Link", "")
|
||||
if not link_header:
|
||||
return None
|
||||
match = re.search(r'<([^>]+)>;\s*rel="next"', link_header)
|
||||
if match:
|
||||
next_url = match.group(1)
|
||||
if next_url.startswith("/"):
|
||||
return f"{_REGISTRY_HOST}{next_url}"
|
||||
return next_url
|
||||
return None
|
||||
if not match:
|
||||
return None
|
||||
next_url = match.group(1)
|
||||
if next_url.startswith("/"):
|
||||
next_url = f"{_REGISTRY_HOST}{next_url}"
|
||||
return self._validate_outbound_url(next_url, origin_url=_REGISTRY_HOST)
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import ipaddress
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
from urllib.parse import urlparse
|
||||
@@ -43,6 +42,9 @@ class OciRegistryAdapter(RegistryAdapter):
|
||||
url = f"https://{url}"
|
||||
return url
|
||||
|
||||
def _origin_url(self) -> str:
|
||||
return self._base_url
|
||||
|
||||
def list_repositories(self) -> list[str]:
|
||||
self._ensure_auth()
|
||||
repositories: list[str] = []
|
||||
@@ -127,8 +129,9 @@ class OciRegistryAdapter(RegistryAdapter):
|
||||
file=__file__,
|
||||
message=f"Cannot parse token endpoint from registry {self.registry_url}. Www-Authenticate: {www_authenticate[:200]}",
|
||||
)
|
||||
realm = match.group(1)
|
||||
self._validate_realm_url(realm)
|
||||
realm = self._validate_outbound_url(match.group(1))
|
||||
if urlparse(realm).scheme == "http":
|
||||
logger.warning(f"Bearer token realm uses HTTP (not HTTPS): {realm}")
|
||||
params: dict = {}
|
||||
service_match = re.search(r'service="([^"]+)"', www_authenticate)
|
||||
if service_match:
|
||||
@@ -156,27 +159,6 @@ class OciRegistryAdapter(RegistryAdapter):
|
||||
)
|
||||
return token
|
||||
|
||||
@staticmethod
|
||||
def _validate_realm_url(realm: str) -> None:
|
||||
parsed = urlparse(realm)
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=f"Bearer token realm has disallowed scheme: {parsed.scheme}. Only http/https are allowed.",
|
||||
)
|
||||
if parsed.scheme == "http":
|
||||
logger.warning(f"Bearer token realm uses HTTP (not HTTPS): {realm}")
|
||||
hostname = parsed.hostname or ""
|
||||
try:
|
||||
addr = ipaddress.ip_address(hostname)
|
||||
if addr.is_private or addr.is_loopback or addr.is_link_local:
|
||||
raise ImageRegistryAuthError(
|
||||
file=__file__,
|
||||
message=f"Bearer token realm points to a private/loopback address: {hostname}. This may indicate an SSRF attempt.",
|
||||
)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def _resolve_basic_credentials(self) -> tuple[str | None, str | None]:
|
||||
"""Decode pre-encoded base64 auth tokens (e.g., from aws ecr get-authorization-token).
|
||||
|
||||
@@ -206,14 +188,24 @@ class OciRegistryAdapter(RegistryAdapter):
|
||||
|
||||
def _do_authed_request(self, method: str, url: str, **kwargs) -> requests.Response:
|
||||
headers = kwargs.pop("headers", {})
|
||||
if self._bearer_token:
|
||||
headers["Authorization"] = f"Bearer {self._bearer_token}"
|
||||
elif self.username and self.password:
|
||||
user, pwd = self._resolve_basic_credentials()
|
||||
kwargs.setdefault("auth", (user, pwd))
|
||||
if self._is_same_origin_as_registry(url):
|
||||
if self._bearer_token:
|
||||
headers["Authorization"] = f"Bearer {self._bearer_token}"
|
||||
elif self.username and self.password:
|
||||
user, pwd = self._resolve_basic_credentials()
|
||||
kwargs.setdefault("auth", (user, pwd))
|
||||
kwargs["headers"] = headers
|
||||
return self._request_with_retry(method, url, **kwargs)
|
||||
|
||||
def _is_same_origin_as_registry(self, url: str) -> bool:
|
||||
target = urlparse(url)
|
||||
origin = urlparse(self._base_url)
|
||||
return (
|
||||
target.scheme == origin.scheme
|
||||
and (target.hostname or "") == (origin.hostname or "")
|
||||
and target.port == origin.port
|
||||
)
|
||||
|
||||
def _check_response(self, resp: requests.Response, context: str) -> None:
|
||||
if resp.status_code == 200:
|
||||
return
|
||||
|
||||
+1
-1
@@ -95,7 +95,7 @@ maintainers = [{name = "Prowler Engineering", email = "engineering@prowler.com"}
|
||||
name = "prowler"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
version = "5.25.0"
|
||||
version = "5.26.0"
|
||||
|
||||
[project.scripts]
|
||||
prowler = "prowler.__main__:prowler"
|
||||
|
||||
@@ -436,6 +436,33 @@ class Test_Config:
|
||||
assert "csa_ccm_4.0" in aws_frameworks
|
||||
assert "csa_ccm_4.0" not in kubernetes_frameworks
|
||||
|
||||
def test_get_available_compliance_frameworks_no_provider_includes_universals(self):
|
||||
"""Regression test for the variable shadowing bug.
|
||||
|
||||
Previously, the inner ``for provider in providers`` loop shadowed
|
||||
the outer ``provider`` parameter. When called without a provider,
|
||||
the post-loop ``if provider:`` branch wrongly applied
|
||||
``framework.supports_provider(<last provider iterated>)`` and
|
||||
excluded universal frameworks from the result.
|
||||
|
||||
Result: the parser-level ``available_compliance_frameworks``
|
||||
constant was missing universal frameworks like ``csa_ccm_4.0``,
|
||||
which made ``--compliance csa_ccm_4.0`` reject the choice.
|
||||
"""
|
||||
all_frameworks = get_available_compliance_frameworks()
|
||||
assert "csa_ccm_4.0" in all_frameworks
|
||||
|
||||
def test_get_available_compliance_frameworks_does_not_mutate_provider_param(self):
|
||||
"""Calling with a specific provider must not affect a subsequent
|
||||
call without provider. Validates that the loop variable rename
|
||||
prevents leaking state between calls."""
|
||||
# Force an iteration over multiple providers first
|
||||
get_available_compliance_frameworks("kubernetes")
|
||||
# Then a no-provider call must still include universals supported
|
||||
# by ANY provider (not filtered by some leaked value)
|
||||
all_frameworks = get_available_compliance_frameworks()
|
||||
assert "csa_ccm_4.0" in all_frameworks
|
||||
|
||||
def test_load_and_validate_config_file_aws(self):
|
||||
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
config_test_file = f"{path}/fixtures/config.yaml"
|
||||
|
||||
@@ -675,3 +675,177 @@ class TestCheckLoader:
|
||||
)
|
||||
assert CLOUDTRAIL_THREAT_DETECTION_ENUMERATION_NAME not in result
|
||||
assert S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME in result
|
||||
|
||||
def test_load_checks_to_execute_universal_framework_takes_precedence(self):
|
||||
"""When ``--compliance <fw>`` matches a universal framework, the
|
||||
loader must source checks from ``universal_frameworks[fw].requirements[*]
|
||||
.checks[provider]`` and NOT fall through to ``bulk_compliance_frameworks``.
|
||||
|
||||
This is the path added by PR #10301 in checks_loader.py.
|
||||
"""
|
||||
from prowler.lib.check.compliance_models import (
|
||||
ComplianceFramework,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
|
||||
bulk_checks_metadata = {
|
||||
S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME: self.get_custom_check_s3_metadata()
|
||||
}
|
||||
|
||||
universal_framework = ComplianceFramework(
|
||||
framework="csa_ccm",
|
||||
name="CSA CCM 4.0",
|
||||
version="4.0",
|
||||
description="Cloud Controls Matrix",
|
||||
requirements=[
|
||||
UniversalComplianceRequirement(
|
||||
id="A&A-01",
|
||||
description="Audit & Assurance",
|
||||
attributes={},
|
||||
checks={"aws": [S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME]},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"prowler.lib.check.checks_loader.CheckMetadata.get_bulk",
|
||||
return_value=bulk_checks_metadata,
|
||||
):
|
||||
result = load_checks_to_execute(
|
||||
bulk_checks_metadata=bulk_checks_metadata,
|
||||
bulk_compliance_frameworks={}, # legacy empty
|
||||
compliance_frameworks=["csa_ccm_4.0"],
|
||||
provider=self.provider,
|
||||
universal_frameworks={"csa_ccm_4.0": universal_framework},
|
||||
)
|
||||
|
||||
assert result == {S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME}
|
||||
|
||||
def test_load_checks_to_execute_universal_filters_by_provider(self):
|
||||
"""A universal requirement may declare checks for several
|
||||
providers; the loader must only return those for the active
|
||||
provider key (lowercased)."""
|
||||
from prowler.lib.check.compliance_models import (
|
||||
ComplianceFramework,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
|
||||
bulk_checks_metadata = {
|
||||
S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME: self.get_custom_check_s3_metadata()
|
||||
}
|
||||
|
||||
# The same requirement maps a different check per provider.
|
||||
# Only the AWS one must be returned for provider="aws".
|
||||
universal_framework = ComplianceFramework(
|
||||
framework="csa_ccm",
|
||||
name="CSA CCM 4.0",
|
||||
version="4.0",
|
||||
description="Cloud Controls Matrix",
|
||||
requirements=[
|
||||
UniversalComplianceRequirement(
|
||||
id="A&A-02",
|
||||
description="Multi-provider req",
|
||||
attributes={},
|
||||
checks={
|
||||
"aws": [S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME],
|
||||
"azure": ["azure_only_check"],
|
||||
"gcp": ["gcp_only_check"],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"prowler.lib.check.checks_loader.CheckMetadata.get_bulk",
|
||||
return_value=bulk_checks_metadata,
|
||||
):
|
||||
result = load_checks_to_execute(
|
||||
bulk_checks_metadata=bulk_checks_metadata,
|
||||
bulk_compliance_frameworks={},
|
||||
compliance_frameworks=["csa_ccm_4.0"],
|
||||
provider=self.provider, # "aws"
|
||||
universal_frameworks={"csa_ccm_4.0": universal_framework},
|
||||
)
|
||||
|
||||
assert S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME in result
|
||||
assert "azure_only_check" not in result
|
||||
assert "gcp_only_check" not in result
|
||||
|
||||
def test_load_checks_to_execute_universal_no_match_falls_back_to_legacy(self):
|
||||
"""If the requested compliance framework is not present in
|
||||
``universal_frameworks``, the loader must fall back to the
|
||||
legacy ``bulk_compliance_frameworks`` lookup."""
|
||||
bulk_checks_metadata = {
|
||||
S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME: self.get_custom_check_s3_metadata()
|
||||
}
|
||||
bulk_compliance_frameworks = {
|
||||
"soc2_aws": Compliance(
|
||||
Framework="SOC2",
|
||||
Name="SOC2",
|
||||
Provider="aws",
|
||||
Version="2.0",
|
||||
Description="x",
|
||||
Requirements=[
|
||||
Compliance_Requirement(
|
||||
Checks=[S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME],
|
||||
Id="",
|
||||
Description="",
|
||||
Attributes=[],
|
||||
)
|
||||
],
|
||||
),
|
||||
}
|
||||
|
||||
with patch(
|
||||
"prowler.lib.check.checks_loader.CheckMetadata.get_bulk",
|
||||
return_value=bulk_checks_metadata,
|
||||
):
|
||||
result = load_checks_to_execute(
|
||||
bulk_checks_metadata=bulk_checks_metadata,
|
||||
bulk_compliance_frameworks=bulk_compliance_frameworks,
|
||||
compliance_frameworks=["soc2_aws"],
|
||||
provider=self.provider,
|
||||
universal_frameworks={"some_other_universal_fw": object()},
|
||||
)
|
||||
|
||||
assert result == {S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME}
|
||||
|
||||
def test_load_checks_to_execute_universal_unknown_provider_returns_empty(self):
|
||||
"""If the universal requirement has no checks for the active
|
||||
provider, no checks are picked up for that requirement."""
|
||||
from prowler.lib.check.compliance_models import (
|
||||
ComplianceFramework,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
|
||||
bulk_checks_metadata = {
|
||||
S3_BUCKET_LEVEL_PUBLIC_ACCESS_BLOCK_NAME: self.get_custom_check_s3_metadata()
|
||||
}
|
||||
universal_framework = ComplianceFramework(
|
||||
framework="csa_ccm",
|
||||
name="CSA CCM 4.0",
|
||||
version="4.0",
|
||||
description="Cloud Controls Matrix",
|
||||
requirements=[
|
||||
UniversalComplianceRequirement(
|
||||
id="A&A-03",
|
||||
description="Only Azure",
|
||||
attributes={},
|
||||
checks={"azure": ["azure_only_check"]},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"prowler.lib.check.checks_loader.CheckMetadata.get_bulk",
|
||||
return_value=bulk_checks_metadata,
|
||||
):
|
||||
result = load_checks_to_execute(
|
||||
bulk_checks_metadata=bulk_checks_metadata,
|
||||
bulk_compliance_frameworks={},
|
||||
compliance_frameworks=["csa_ccm_4.0"],
|
||||
provider=self.provider, # "aws" — no checks declared
|
||||
universal_frameworks={"csa_ccm_4.0": universal_framework},
|
||||
)
|
||||
|
||||
assert result == set()
|
||||
|
||||
@@ -442,3 +442,123 @@ class TestComplianceOutput:
|
||||
)
|
||||
|
||||
assert compliance_output.file_extension == ".csv"
|
||||
|
||||
|
||||
class TestComplianceCheckHelperModule:
|
||||
"""Tests for the new ``compliance_check`` leaf module that hosts
|
||||
``get_check_compliance``.
|
||||
|
||||
This module exists to break the cyclic import chain
|
||||
``finding -> compliance.compliance -> universal.* -> finding`` that
|
||||
CodeQL flagged. It must be:
|
||||
- importable directly without pulling in the universal pipeline
|
||||
- re-exported by ``compliance.compliance`` for backward compatibility
|
||||
- the SAME function object, regardless of import path
|
||||
"""
|
||||
|
||||
def test_module_is_importable_directly(self):
|
||||
"""The helper module must be importable on its own — it is the
|
||||
leaf used by ``finding.py`` to break the cyclic import chain."""
|
||||
from prowler.lib.outputs.compliance import compliance_check
|
||||
|
||||
assert hasattr(compliance_check, "get_check_compliance")
|
||||
assert callable(compliance_check.get_check_compliance)
|
||||
|
||||
def test_helper_module_only_depends_on_check_models_and_logger(self):
|
||||
"""The helper must not pull in universal pipeline modules; that
|
||||
was the whole point of extracting it. Inspecting the module's
|
||||
own imports keeps it honest without polluting ``sys.modules``."""
|
||||
import inspect
|
||||
|
||||
from prowler.lib.outputs.compliance import compliance_check
|
||||
|
||||
source = inspect.getsource(compliance_check)
|
||||
# Only these two prowler imports are allowed in the leaf module
|
||||
assert "from prowler.lib.check.models import Check_Report" in source
|
||||
assert "from prowler.lib.logger import logger" in source
|
||||
# And NOT these (would re-introduce the cycle):
|
||||
assert "from prowler.lib.outputs.compliance.universal" not in source
|
||||
assert "from prowler.lib.outputs.finding" not in source
|
||||
assert "from prowler.lib.outputs.ocsf" not in source
|
||||
|
||||
def test_re_export_from_compliance_compliance(self):
|
||||
"""``compliance.compliance.get_check_compliance`` must point to
|
||||
the same function as ``compliance.compliance_check.get_check_compliance``."""
|
||||
from prowler.lib.outputs.compliance.compliance import (
|
||||
get_check_compliance as via_compliance,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.compliance_check import (
|
||||
get_check_compliance as via_helper,
|
||||
)
|
||||
|
||||
assert via_compliance is via_helper
|
||||
|
||||
def test_re_export_from_finding_module(self):
|
||||
"""``finding.get_check_compliance`` must point to the same
|
||||
function. Test mocks rely on this attribute existing on the
|
||||
``prowler.lib.outputs.finding`` module."""
|
||||
from prowler.lib.outputs.compliance.compliance_check import (
|
||||
get_check_compliance as via_helper,
|
||||
)
|
||||
from prowler.lib.outputs.finding import get_check_compliance as via_finding
|
||||
|
||||
assert via_finding is via_helper
|
||||
|
||||
def test_returns_empty_dict_on_unknown_check(self):
|
||||
"""Sanity test of the function logic via the helper module."""
|
||||
from prowler.lib.outputs.compliance.compliance_check import (
|
||||
get_check_compliance,
|
||||
)
|
||||
|
||||
finding = mock.MagicMock()
|
||||
finding.check_metadata.CheckID = "unknown_check_id"
|
||||
result = get_check_compliance(finding, "aws", {})
|
||||
assert result == {}
|
||||
|
||||
def test_filters_by_provider(self):
|
||||
"""The function returns frameworks only for the matching provider."""
|
||||
from prowler.lib.outputs.compliance.compliance_check import (
|
||||
get_check_compliance,
|
||||
)
|
||||
|
||||
compliance_aws = mock.MagicMock(
|
||||
Framework="CIS",
|
||||
Version="1.4",
|
||||
Provider="AWS",
|
||||
Requirements=[mock.MagicMock(Id="2.1.3")],
|
||||
)
|
||||
compliance_azure = mock.MagicMock(
|
||||
Framework="CIS",
|
||||
Version="2.0",
|
||||
Provider="Azure",
|
||||
Requirements=[mock.MagicMock(Id="9.1")],
|
||||
)
|
||||
finding = mock.MagicMock()
|
||||
finding.check_metadata.CheckID = "shared_check"
|
||||
bulk = {
|
||||
"shared_check": mock.MagicMock(
|
||||
Compliance=[compliance_aws, compliance_azure]
|
||||
)
|
||||
}
|
||||
|
||||
# Only AWS frameworks come back
|
||||
result = get_check_compliance(finding, "aws", bulk)
|
||||
assert "CIS-1.4" in result
|
||||
assert "CIS-2.0" not in result
|
||||
|
||||
def test_returns_empty_dict_on_exception(self):
|
||||
"""If iteration raises, the function logs the error and returns
|
||||
an empty dict (defensive behaviour)."""
|
||||
from prowler.lib.outputs.compliance.compliance_check import (
|
||||
get_check_compliance,
|
||||
)
|
||||
|
||||
# bulk_checks_metadata that raises when accessed → defensive path
|
||||
class Boom:
|
||||
def __contains__(self, _key):
|
||||
raise RuntimeError("boom")
|
||||
|
||||
finding = mock.MagicMock()
|
||||
finding.check_metadata.CheckID = "any"
|
||||
result = get_check_compliance(finding, "aws", Boom())
|
||||
assert result == {}
|
||||
|
||||
@@ -0,0 +1,244 @@
|
||||
"""Tests for display_compliance_table dispatch logic.
|
||||
|
||||
Validates that each compliance framework name is routed to the correct
|
||||
table renderer via startswith matching, and that the universal early-return
|
||||
takes precedence when applicable.
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from prowler.lib.check.compliance_models import (
|
||||
ComplianceFramework,
|
||||
OutputsConfig,
|
||||
TableConfig,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
|
||||
MODULE = "prowler.lib.outputs.compliance.compliance"
|
||||
|
||||
# Common args shared by every call — the actual values don't matter
|
||||
# because we mock the downstream renderers.
|
||||
_COMMON = dict(
|
||||
findings=[],
|
||||
bulk_checks_metadata={},
|
||||
output_filename="out",
|
||||
output_directory="/tmp",
|
||||
compliance_overview=False,
|
||||
)
|
||||
|
||||
|
||||
# ── Dispatch to legacy table renderers ───────────────────────────────
|
||||
|
||||
|
||||
class TestDispatchStartswith:
|
||||
"""Each framework prefix must route to exactly one renderer."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
[
|
||||
"cis_1.4_aws",
|
||||
"cis_2.0_azure",
|
||||
"cis_3.0_gcp",
|
||||
"cis_6.0_m365",
|
||||
"cis_1.10_kubernetes",
|
||||
],
|
||||
)
|
||||
@patch(f"{MODULE}.get_cis_table")
|
||||
def test_cis_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
["ens_rd2022_aws", "ens_rd2022_azure", "ens_rd2022_gcp"],
|
||||
)
|
||||
@patch(f"{MODULE}.get_ens_table")
|
||||
def test_ens_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
["mitre_attack_aws", "mitre_attack_azure", "mitre_attack_gcp"],
|
||||
)
|
||||
@patch(f"{MODULE}.get_mitre_attack_table")
|
||||
def test_mitre_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
["kisa_isms_p_2023_aws", "kisa_isms_p_2023_korean_aws"],
|
||||
)
|
||||
@patch(f"{MODULE}.get_kisa_ismsp_table")
|
||||
def test_kisa_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
[
|
||||
"prowler_threatscore_aws",
|
||||
"prowler_threatscore_azure",
|
||||
"prowler_threatscore_gcp",
|
||||
"prowler_threatscore_kubernetes",
|
||||
"prowler_threatscore_m365",
|
||||
"prowler_threatscore_alibabacloud",
|
||||
],
|
||||
)
|
||||
@patch(f"{MODULE}.get_prowler_threatscore_table")
|
||||
def test_threatscore_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
[
|
||||
"csa_ccm_4.0_aws",
|
||||
"csa_ccm_4.0_azure",
|
||||
"csa_ccm_4.0_gcp",
|
||||
"csa_ccm_4.0_oraclecloud",
|
||||
"csa_ccm_4.0_alibabacloud",
|
||||
],
|
||||
)
|
||||
@patch(f"{MODULE}.get_csa_table")
|
||||
def test_csa_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
["c5_aws", "c5_azure", "c5_gcp"],
|
||||
)
|
||||
@patch(f"{MODULE}.get_c5_table")
|
||||
def test_c5_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"framework_name",
|
||||
[
|
||||
"soc2_aws",
|
||||
"hipaa_aws",
|
||||
"gdpr_aws",
|
||||
"nist_800_53_revision_4_aws",
|
||||
"pci_3.2.1_aws",
|
||||
"iso27001_2013_aws",
|
||||
"aws_well_architected_framework_security_pillar_aws",
|
||||
"fedramp_low_revision_4_aws",
|
||||
"cisa_aws",
|
||||
],
|
||||
)
|
||||
@patch(f"{MODULE}.get_generic_compliance_table")
|
||||
def test_generic_dispatch(self, mock_fn, framework_name):
|
||||
display_compliance_table(compliance_framework=framework_name, **_COMMON)
|
||||
mock_fn.assert_called_once()
|
||||
|
||||
|
||||
# ── No false matches (the old `in` bug) ─────────────────────────────
|
||||
|
||||
|
||||
class TestNoFalseSubstringMatches:
|
||||
"""Frameworks that previously could false-match with `in` must NOT
|
||||
be routed to the wrong renderer now that we use startswith."""
|
||||
|
||||
@patch(f"{MODULE}.get_ens_table")
|
||||
@patch(f"{MODULE}.get_generic_compliance_table")
|
||||
def test_cisa_does_not_match_cis(self, mock_generic, mock_cis):
|
||||
"""'cisa_aws' must NOT match startswith('cis_')."""
|
||||
display_compliance_table(compliance_framework="cisa_aws", **_COMMON)
|
||||
mock_generic.assert_called_once()
|
||||
mock_cis.assert_not_called()
|
||||
|
||||
@patch(f"{MODULE}.get_prowler_threatscore_table")
|
||||
@patch(f"{MODULE}.get_generic_compliance_table")
|
||||
def test_threatscore_prefix_not_partial(self, mock_generic, mock_ts):
|
||||
"""A hypothetical 'threatscore_custom_aws' must NOT match
|
||||
startswith('prowler_threatscore_')."""
|
||||
display_compliance_table(
|
||||
compliance_framework="threatscore_custom_aws", **_COMMON
|
||||
)
|
||||
mock_generic.assert_called_once()
|
||||
mock_ts.assert_not_called()
|
||||
|
||||
@patch(f"{MODULE}.get_ens_table")
|
||||
@patch(f"{MODULE}.get_prowler_threatscore_table")
|
||||
def test_prowler_threatscore_does_not_match_ens(self, mock_ts, mock_ens):
|
||||
"""'prowler_threatscore_aws' must hit threatscore, never ens."""
|
||||
display_compliance_table(
|
||||
compliance_framework="prowler_threatscore_aws", **_COMMON
|
||||
)
|
||||
mock_ts.assert_called_once()
|
||||
mock_ens.assert_not_called()
|
||||
|
||||
|
||||
# ── Universal early-return ───────────────────────────────────────────
|
||||
|
||||
|
||||
class TestUniversalEarlyReturn:
|
||||
"""The universal path must take precedence over the elif chain."""
|
||||
|
||||
@staticmethod
|
||||
def _make_fw():
|
||||
return ComplianceFramework(
|
||||
framework="CIS",
|
||||
name="CIS",
|
||||
provider="AWS",
|
||||
version="5.0",
|
||||
description="d",
|
||||
requirements=[
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="d",
|
||||
attributes={},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
],
|
||||
outputs=OutputsConfig(table_config=TableConfig(group_by="_default")),
|
||||
)
|
||||
|
||||
@patch(f"{MODULE}.get_universal_table")
|
||||
@patch(f"{MODULE}.get_cis_table")
|
||||
def test_universal_takes_precedence_over_cis(self, mock_cis, mock_universal):
|
||||
"""A CIS framework in universal_frameworks with TableConfig must
|
||||
use the universal renderer, not get_cis_table."""
|
||||
fw = self._make_fw()
|
||||
display_compliance_table(
|
||||
compliance_framework="cis_5.0_aws",
|
||||
universal_frameworks={"cis_5.0_aws": fw},
|
||||
**_COMMON,
|
||||
)
|
||||
mock_universal.assert_called_once()
|
||||
mock_cis.assert_not_called()
|
||||
|
||||
@patch(f"{MODULE}.get_universal_table")
|
||||
@patch(f"{MODULE}.get_cis_table")
|
||||
def test_falls_through_without_table_config(self, mock_cis, mock_universal):
|
||||
"""If the universal framework has no TableConfig, fall through
|
||||
to the legacy elif chain."""
|
||||
fw = self._make_fw()
|
||||
fw.outputs = None
|
||||
display_compliance_table(
|
||||
compliance_framework="cis_5.0_aws",
|
||||
universal_frameworks={"cis_5.0_aws": fw},
|
||||
**_COMMON,
|
||||
)
|
||||
mock_cis.assert_called_once()
|
||||
mock_universal.assert_not_called()
|
||||
|
||||
@patch(f"{MODULE}.get_universal_table")
|
||||
@patch(f"{MODULE}.get_generic_compliance_table")
|
||||
def test_falls_through_when_not_in_universal_dict(
|
||||
self, mock_generic, mock_universal
|
||||
):
|
||||
"""If universal_frameworks is empty, fall through to legacy."""
|
||||
display_compliance_table(
|
||||
compliance_framework="soc2_aws",
|
||||
universal_frameworks={},
|
||||
**_COMMON,
|
||||
)
|
||||
mock_generic.assert_called_once()
|
||||
mock_universal.assert_not_called()
|
||||
@@ -0,0 +1,128 @@
|
||||
from io import StringIO
|
||||
from unittest import mock
|
||||
|
||||
from freezegun import freeze_time
|
||||
from mock import patch
|
||||
|
||||
from prowler.lib.outputs.compliance.essential_eight.essential_eight_aws import (
|
||||
EssentialEightAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.essential_eight.models import (
|
||||
EssentialEightAWSModel,
|
||||
)
|
||||
from tests.lib.outputs.compliance.fixtures import ESSENTIAL_EIGHT_AWS
|
||||
from tests.lib.outputs.fixtures.fixtures import generate_finding_output
|
||||
from tests.providers.aws.utils import AWS_ACCOUNT_NUMBER, AWS_REGION_EU_WEST_1
|
||||
|
||||
# The fixture's first Requirement maps clause "E8-1.8" (Patch applications,
|
||||
# clause 8: removal of unsupported online services). The second Requirement is
|
||||
# E8-6.1 (Restrict Office macros, clause 1) which has no Checks and is therefore
|
||||
# emitted as a manual row.
|
||||
COMPLIANCE_NAME = "Essential-Eight-Nov 2023"
|
||||
|
||||
|
||||
class TestEssentialEightAWS:
|
||||
def test_output_transform(self):
|
||||
findings = [generate_finding_output(compliance={COMPLIANCE_NAME: "E8-1.8"})]
|
||||
|
||||
output = EssentialEightAWS(findings, ESSENTIAL_EIGHT_AWS)
|
||||
output_data = output.data[0]
|
||||
assert isinstance(output_data, EssentialEightAWSModel)
|
||||
assert output_data.Provider == "aws"
|
||||
assert output_data.Framework == ESSENTIAL_EIGHT_AWS.Framework
|
||||
assert output_data.Name == ESSENTIAL_EIGHT_AWS.Name
|
||||
assert output_data.Description == ESSENTIAL_EIGHT_AWS.Description
|
||||
assert output_data.AccountId == AWS_ACCOUNT_NUMBER
|
||||
assert output_data.Region == AWS_REGION_EU_WEST_1
|
||||
assert output_data.Requirements_Id == "E8-1.8"
|
||||
assert (
|
||||
output_data.Requirements_Description
|
||||
== ESSENTIAL_EIGHT_AWS.Requirements[0].Description
|
||||
)
|
||||
assert output_data.Requirements_Attributes_Section == "1 Patch applications"
|
||||
assert output_data.Requirements_Attributes_MaturityLevel == "ML1"
|
||||
assert output_data.Requirements_Attributes_AssessmentStatus == "Automated"
|
||||
assert output_data.Requirements_Attributes_CloudApplicability == "full"
|
||||
assert (
|
||||
output_data.Requirements_Attributes_MitigatedThreats
|
||||
== "Use of unsupported software, Long-tail vulnerability accumulation"
|
||||
)
|
||||
assert (
|
||||
output_data.Requirements_Attributes_Description
|
||||
== ESSENTIAL_EIGHT_AWS.Requirements[0].Attributes[0].Description
|
||||
)
|
||||
assert output_data.Status == "PASS"
|
||||
assert output_data.StatusExtended == ""
|
||||
assert output_data.ResourceId == ""
|
||||
assert output_data.ResourceName == ""
|
||||
assert output_data.CheckId == "service_test_check_id"
|
||||
assert not output_data.Muted
|
||||
|
||||
def test_manual_requirement(self):
|
||||
findings = [generate_finding_output(compliance={COMPLIANCE_NAME: "E8-1.8"})]
|
||||
output = EssentialEightAWS(findings, ESSENTIAL_EIGHT_AWS)
|
||||
|
||||
# E8-6.1 (macros) has no Checks -> emitted as a manual row, non-applicable
|
||||
manual_rows = [row for row in output.data if row.Status == "MANUAL"]
|
||||
assert len(manual_rows) == 1
|
||||
|
||||
manual = manual_rows[0]
|
||||
assert manual.Provider == "aws"
|
||||
assert manual.AccountId == ""
|
||||
assert manual.Region == ""
|
||||
assert manual.Requirements_Id == "E8-6.1"
|
||||
assert (
|
||||
manual.Requirements_Attributes_Section
|
||||
== "6 Restrict Microsoft Office macros"
|
||||
)
|
||||
assert manual.Requirements_Attributes_MaturityLevel == "ML1"
|
||||
assert manual.Requirements_Attributes_AssessmentStatus == "Manual"
|
||||
assert manual.Requirements_Attributes_CloudApplicability == "non-applicable"
|
||||
assert (
|
||||
manual.Requirements_Attributes_MitigatedThreats
|
||||
== "Macro-based malware delivery"
|
||||
)
|
||||
assert manual.StatusExtended == "Manual check"
|
||||
assert manual.ResourceId == "manual_check"
|
||||
assert manual.ResourceName == "Manual check"
|
||||
assert manual.CheckId == "manual"
|
||||
assert not manual.Muted
|
||||
|
||||
@freeze_time("2025-01-01 00:00:00")
|
||||
@mock.patch(
|
||||
"prowler.lib.outputs.compliance.essential_eight.essential_eight_aws.timestamp",
|
||||
"2025-01-01 00:00:00",
|
||||
)
|
||||
def test_batch_write_data_to_file(self):
|
||||
mock_file = StringIO()
|
||||
findings = [generate_finding_output(compliance={COMPLIANCE_NAME: "E8-1.8"})]
|
||||
output = EssentialEightAWS(findings, ESSENTIAL_EIGHT_AWS)
|
||||
output._file_descriptor = mock_file
|
||||
|
||||
with patch.object(mock_file, "close", return_value=None):
|
||||
output.batch_write_data_to_file()
|
||||
|
||||
mock_file.seek(0)
|
||||
content = mock_file.read()
|
||||
|
||||
# Validate header carries the E8-specific column names
|
||||
first_line = content.split("\r\n", 1)[0]
|
||||
for column in (
|
||||
"REQUIREMENTS_ATTRIBUTES_MATURITYLEVEL",
|
||||
"REQUIREMENTS_ATTRIBUTES_ASSESSMENTSTATUS",
|
||||
"REQUIREMENTS_ATTRIBUTES_CLOUDAPPLICABILITY",
|
||||
"REQUIREMENTS_ATTRIBUTES_MITIGATEDTHREATS",
|
||||
"REQUIREMENTS_ATTRIBUTES_RATIONALESTATEMENT",
|
||||
"REQUIREMENTS_ATTRIBUTES_REMEDIATIONPROCEDURE",
|
||||
"REQUIREMENTS_ATTRIBUTES_AUDITPROCEDURE",
|
||||
):
|
||||
assert column in first_line, f"missing column {column} in CSV header"
|
||||
|
||||
# rows: header + matched + manual
|
||||
rows = [r for r in content.split("\r\n") if r]
|
||||
assert len(rows) == 3
|
||||
assert rows[1].split(";")[0] == "aws"
|
||||
assert "ML1" in rows[1]
|
||||
assert ";PASS;" in rows[1]
|
||||
assert ";MANUAL;" in rows[2]
|
||||
assert ";manual_check;" in rows[2]
|
||||
@@ -7,6 +7,7 @@ from prowler.lib.check.compliance_models import (
|
||||
ENS_Requirement_Attribute,
|
||||
ENS_Requirement_Attribute_Nivel,
|
||||
ENS_Requirement_Attribute_Tipos,
|
||||
EssentialEight_Requirement_Attribute,
|
||||
Generic_Compliance_Requirement_Attribute,
|
||||
ISO27001_2013_Requirement_Attribute,
|
||||
KISA_ISMSP_Requirement_Attribute,
|
||||
@@ -1189,3 +1190,58 @@ CCC_GCP_FIXTURE = Compliance(
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
ESSENTIAL_EIGHT_AWS = Compliance(
|
||||
Framework="Essential-Eight",
|
||||
Name="ASD Essential Eight Maturity Model - Maturity Level One (AWS)",
|
||||
Version="Nov 2023",
|
||||
Provider="AWS",
|
||||
Description="Literal mapping of the Australian Signals Directorate (ASD) Essential Eight Maturity Model ML1 to AWS infrastructure checks.",
|
||||
Requirements=[
|
||||
Compliance_Requirement(
|
||||
Id="E8-1.8",
|
||||
Description="Online services that are no longer supported by vendors are removed.",
|
||||
Attributes=[
|
||||
EssentialEight_Requirement_Attribute(
|
||||
Section="1 Patch applications",
|
||||
MaturityLevel="ML1",
|
||||
AssessmentStatus="Automated",
|
||||
CloudApplicability="full",
|
||||
MitigatedThreats=[
|
||||
"Use of unsupported software",
|
||||
"Long-tail vulnerability accumulation",
|
||||
],
|
||||
Description="Detect and remove unsupported AWS-hosted online services (Lambda runtimes, RDS engines, EKS, Fargate, Kafka, OpenSearch).",
|
||||
RationaleStatement="Unsupported services no longer receive security patches.",
|
||||
ImpactStatement="",
|
||||
RemediationProcedure="Migrate Lambda off deprecated runtimes; remove RDS Extended Support; upgrade EKS.",
|
||||
AuditProcedure="Run all listed checks.",
|
||||
AdditionalInformation="ASD Essential Eight ML1 - Patch applications - clause 8.",
|
||||
References="https://www.cyber.gov.au/resources-business-and-government/essential-cyber-security/essential-eight/essential-eight-maturity-model",
|
||||
)
|
||||
],
|
||||
Checks=["service_test_check_id"],
|
||||
),
|
||||
Compliance_Requirement(
|
||||
Id="E8-6.1",
|
||||
Description="Microsoft Office macros are disabled for users that do not have a demonstrated business requirement.",
|
||||
Attributes=[
|
||||
EssentialEight_Requirement_Attribute(
|
||||
Section="6 Restrict Microsoft Office macros",
|
||||
MaturityLevel="ML1",
|
||||
AssessmentStatus="Manual",
|
||||
CloudApplicability="non-applicable",
|
||||
MitigatedThreats=["Macro-based malware delivery"],
|
||||
Description="Endpoint / Microsoft 365 control. Out of AWS infrastructure scope.",
|
||||
RationaleStatement="Most users never need Office macros.",
|
||||
ImpactStatement="",
|
||||
RemediationProcedure="Disable macros via Group Policy / Intune / M365 admin policies.",
|
||||
AuditProcedure="Manual review of M365 macro policy.",
|
||||
AdditionalInformation="ASD Essential Eight ML1 - Restrict Microsoft Office macros - clause 1. Out of AWS infrastructure scope.",
|
||||
References="https://www.cyber.gov.au/resources-business-and-government/essential-cyber-security/essential-eight/essential-eight-maturity-model",
|
||||
)
|
||||
],
|
||||
Checks=[],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -0,0 +1,730 @@
|
||||
"""Tests for process_universal_compliance_frameworks and --list-compliance fixes.
|
||||
|
||||
Validates that the pre-processing step:
|
||||
- generates both CSV and OCSF outputs for universal frameworks
|
||||
- always generates OCSF (no output-format gate)
|
||||
- skips frameworks without outputs or table_config
|
||||
- skips frameworks not in universal_frameworks
|
||||
- returns the set of processed names for removal from the legacy loop
|
||||
- works across different providers
|
||||
|
||||
Also validates that print_compliance_frameworks and print_compliance_requirements
|
||||
work with universal ComplianceFramework objects (dict checks, None provider).
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from prowler.lib.check.check import (
|
||||
print_compliance_frameworks,
|
||||
print_compliance_requirements,
|
||||
)
|
||||
from prowler.lib.check.compliance_models import (
|
||||
AttributeMetadata,
|
||||
ComplianceFramework,
|
||||
OutputsConfig,
|
||||
TableConfig,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.compliance import (
|
||||
process_universal_compliance_frameworks,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.ocsf_compliance import (
|
||||
OCSFComplianceOutput,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.universal_output import (
|
||||
UniversalComplianceOutput,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _create_compliance_dir(tmp_path):
|
||||
"""Ensure the compliance/ subdirectory exists before each test."""
|
||||
os.makedirs(tmp_path / "compliance", exist_ok=True)
|
||||
|
||||
|
||||
# ── Helpers ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _make_finding(check_id, status="PASS", provider="aws"):
|
||||
"""Create a mock Finding with all fields needed by both output classes."""
|
||||
finding = SimpleNamespace()
|
||||
finding.provider = provider
|
||||
finding.account_uid = "123456789012"
|
||||
finding.account_name = "test-account"
|
||||
finding.account_email = ""
|
||||
finding.account_organization_uid = "org-123"
|
||||
finding.account_organization_name = "test-org"
|
||||
finding.account_tags = {"env": "test"}
|
||||
finding.region = "us-east-1"
|
||||
finding.status = status
|
||||
finding.status_extended = f"{check_id} is {status}"
|
||||
finding.resource_uid = f"arn:aws:iam::123456789012:{check_id}"
|
||||
finding.resource_name = check_id
|
||||
finding.resource_details = "some details"
|
||||
finding.resource_metadata = {}
|
||||
finding.resource_tags = {"Name": "test"}
|
||||
finding.partition = "aws"
|
||||
finding.muted = False
|
||||
finding.check_id = check_id
|
||||
finding.uid = "test-finding-uid"
|
||||
finding.timestamp = datetime(2025, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
|
||||
finding.prowler_version = "5.0.0"
|
||||
finding.compliance = {"TestFW-1.0": ["1.1"]}
|
||||
finding.metadata = SimpleNamespace(
|
||||
Provider=provider,
|
||||
CheckID=check_id,
|
||||
CheckTitle=f"Title for {check_id}",
|
||||
CheckType=["test-type"],
|
||||
Description=f"Description for {check_id}",
|
||||
Severity="medium",
|
||||
ServiceName="iam",
|
||||
ResourceType="aws-iam-role",
|
||||
Risk="test-risk",
|
||||
RelatedUrl="https://example.com",
|
||||
Remediation=SimpleNamespace(
|
||||
Recommendation=SimpleNamespace(Text="Fix it", Url="https://fix.com"),
|
||||
),
|
||||
DependsOn=[],
|
||||
RelatedTo=[],
|
||||
Categories=["test"],
|
||||
Notes="",
|
||||
AdditionalURLs=[],
|
||||
)
|
||||
return finding
|
||||
|
||||
|
||||
def _make_universal_framework(name="TestFW", version="1.0", with_table_config=True):
|
||||
"""Build a ComplianceFramework with optional table_config."""
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="Test requirement",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
]
|
||||
metadata = [AttributeMetadata(key="Section", type="str")]
|
||||
outputs = None
|
||||
if with_table_config:
|
||||
outputs = OutputsConfig(table_config=TableConfig(group_by="Section"))
|
||||
return ComplianceFramework(
|
||||
framework=name,
|
||||
name=f"{name} Framework",
|
||||
provider="AWS",
|
||||
version=version,
|
||||
description="Test framework",
|
||||
requirements=reqs,
|
||||
attributes_metadata=metadata,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
|
||||
# ── Tests ────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestProcessUniversalComplianceFrameworks:
|
||||
"""Core tests for the extracted pre-processing function."""
|
||||
|
||||
def test_generates_csv_and_ocsf_outputs(self, tmp_path):
|
||||
"""Both CSV and OCSF outputs are appended to generated_outputs."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"test_fw_1.0"}
|
||||
assert len(generated["compliance"]) == 2
|
||||
assert isinstance(generated["compliance"][0], UniversalComplianceOutput)
|
||||
assert isinstance(generated["compliance"][1], OCSFComplianceOutput)
|
||||
|
||||
def test_ocsf_always_generated_no_format_gate(self, tmp_path):
|
||||
"""OCSF output is generated regardless of output_formats — no gate."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
ocsf_outputs = [
|
||||
o for o in generated["compliance"] if isinstance(o, OCSFComplianceOutput)
|
||||
]
|
||||
assert len(ocsf_outputs) == 1
|
||||
|
||||
def test_csv_file_written(self, tmp_path):
|
||||
"""CSV file is created with expected content."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
csv_path = tmp_path / "compliance" / "prowler_output_test_fw_1.0.csv"
|
||||
assert csv_path.exists()
|
||||
content = csv_path.read_text()
|
||||
assert "PROVIDER" in content
|
||||
assert "REQUIREMENTS_ATTRIBUTES_SECTION" in content
|
||||
|
||||
def test_ocsf_file_written(self, tmp_path):
|
||||
"""OCSF JSON file is created with valid content."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
ocsf_path = tmp_path / "compliance" / "prowler_output_test_fw_1.0.ocsf.json"
|
||||
assert ocsf_path.exists()
|
||||
data = json.loads(ocsf_path.read_text())
|
||||
assert isinstance(data, list)
|
||||
assert len(data) >= 1
|
||||
assert data[0]["class_uid"] == 2003
|
||||
|
||||
def test_returns_processed_names(self, tmp_path):
|
||||
"""Returns the set of framework names that were processed."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0", "legacy_fw"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"test_fw_1.0"}
|
||||
assert "legacy_fw" not in processed
|
||||
|
||||
|
||||
class TestSkipConditions:
|
||||
"""Tests for frameworks that should NOT be processed."""
|
||||
|
||||
def test_skips_framework_not_in_universal(self, tmp_path):
|
||||
"""Frameworks not in universal_frameworks dict are skipped."""
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"cis_aws_1.4"},
|
||||
universal_frameworks={},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == set()
|
||||
assert len(generated["compliance"]) == 0
|
||||
|
||||
def test_skips_framework_without_outputs(self, tmp_path):
|
||||
"""Frameworks with outputs=None are skipped."""
|
||||
fw = _make_universal_framework(with_table_config=False)
|
||||
# outputs is None since with_table_config=False
|
||||
assert fw.outputs is None
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == set()
|
||||
assert len(generated["compliance"]) == 0
|
||||
|
||||
def test_skips_framework_with_outputs_but_no_table_config(self, tmp_path):
|
||||
"""Frameworks with outputs but table_config=None are skipped."""
|
||||
fw = _make_universal_framework()
|
||||
# Manually set table_config to None while keeping outputs
|
||||
fw.outputs = OutputsConfig(table_config=None)
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == set()
|
||||
assert len(generated["compliance"]) == 0
|
||||
|
||||
def test_empty_input_frameworks(self, tmp_path):
|
||||
"""No processing when input set is empty."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks=set(),
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == set()
|
||||
assert len(generated["compliance"]) == 0
|
||||
|
||||
|
||||
class TestMixedFrameworks:
|
||||
"""Tests with a mix of universal and legacy frameworks."""
|
||||
|
||||
def test_only_universal_processed_legacy_untouched(self, tmp_path):
|
||||
"""Only universal frameworks are processed; legacy names are not returned."""
|
||||
universal_fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
all_frameworks = {"test_fw_1.0", "cis_aws_1.4", "nist_800_53_aws"}
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks=all_frameworks,
|
||||
universal_frameworks={"test_fw_1.0": universal_fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"test_fw_1.0"}
|
||||
# 2 outputs for the one universal framework (CSV + OCSF)
|
||||
assert len(generated["compliance"]) == 2
|
||||
|
||||
def test_removal_from_input_set(self, tmp_path):
|
||||
"""Caller can subtract processed set from input to get legacy-only frameworks."""
|
||||
universal_fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
input_frameworks = {"test_fw_1.0", "cis_aws_1.4", "nist_800_53_aws"}
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks=input_frameworks,
|
||||
universal_frameworks={"test_fw_1.0": universal_fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
remaining = input_frameworks - processed
|
||||
assert remaining == {"cis_aws_1.4", "nist_800_53_aws"}
|
||||
|
||||
def test_multiple_universal_frameworks(self, tmp_path):
|
||||
"""Multiple universal frameworks each get CSV + OCSF."""
|
||||
fw1 = _make_universal_framework(name="FW1", version="1.0")
|
||||
fw2 = _make_universal_framework(name="FW2", version="2.0")
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"fw1_1.0", "fw2_2.0", "legacy"},
|
||||
universal_frameworks={"fw1_1.0": fw1, "fw2_2.0": fw2},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"fw1_1.0", "fw2_2.0"}
|
||||
# 2 frameworks × 2 outputs each = 4
|
||||
assert len(generated["compliance"]) == 4
|
||||
csv_outputs = [
|
||||
o
|
||||
for o in generated["compliance"]
|
||||
if isinstance(o, UniversalComplianceOutput)
|
||||
]
|
||||
ocsf_outputs = [
|
||||
o for o in generated["compliance"] if isinstance(o, OCSFComplianceOutput)
|
||||
]
|
||||
assert len(csv_outputs) == 2
|
||||
assert len(ocsf_outputs) == 2
|
||||
|
||||
|
||||
class TestProviderVariants:
|
||||
"""Verify the function works for different providers."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider",
|
||||
[
|
||||
"aws",
|
||||
"azure",
|
||||
"gcp",
|
||||
"kubernetes",
|
||||
"m365",
|
||||
"github",
|
||||
"oraclecloud",
|
||||
"alibabacloud",
|
||||
"nhn",
|
||||
],
|
||||
)
|
||||
def test_all_providers_produce_outputs(self, tmp_path, provider):
|
||||
"""Each provider generates CSV + OCSF when given a universal framework."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a", provider=provider)],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider=provider,
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"test_fw_1.0"}
|
||||
assert len(generated["compliance"]) == 2
|
||||
assert isinstance(generated["compliance"][0], UniversalComplianceOutput)
|
||||
assert isinstance(generated["compliance"][1], OCSFComplianceOutput)
|
||||
|
||||
|
||||
class TestEmptyFindings:
|
||||
"""Test behavior when there are no findings."""
|
||||
|
||||
def test_still_processed_with_empty_findings(self, tmp_path):
|
||||
"""Framework is still marked as processed even with no findings."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
assert processed == {"test_fw_1.0"}
|
||||
# Outputs are still appended (they'll just have empty data)
|
||||
assert len(generated["compliance"]) == 2
|
||||
|
||||
|
||||
class TestFilePaths:
|
||||
"""Verify correct file path construction."""
|
||||
|
||||
def test_csv_path_format(self, tmp_path):
|
||||
"""CSV output has the correct file path."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"csa_ccm_4.0"},
|
||||
universal_frameworks={"csa_ccm_4.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_report",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
csv_output = generated["compliance"][0]
|
||||
assert csv_output.file_path == (
|
||||
f"{tmp_path}/compliance/prowler_report_csa_ccm_4.0.csv"
|
||||
)
|
||||
|
||||
def test_ocsf_path_format(self, tmp_path):
|
||||
"""OCSF output has the correct file path."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"csa_ccm_4.0"},
|
||||
universal_frameworks={"csa_ccm_4.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_report",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
ocsf_output = generated["compliance"][1]
|
||||
assert ocsf_output.file_path == (
|
||||
f"{tmp_path}/compliance/prowler_report_csa_ccm_4.0.ocsf.json"
|
||||
)
|
||||
|
||||
|
||||
# ── Tests for --list-compliance fix ──────────────────────────────────
|
||||
|
||||
|
||||
def _make_legacy_compliance():
|
||||
"""Create a mock legacy Compliance-like object with the expected attributes."""
|
||||
return SimpleNamespace(
|
||||
Framework="CIS",
|
||||
Provider="AWS",
|
||||
Version="1.4",
|
||||
Requirements=[
|
||||
SimpleNamespace(
|
||||
Id="2.1.3",
|
||||
Description="Ensure MFA Delete is enabled",
|
||||
Checks=["s3_bucket_mfa_delete"],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class TestPrintComplianceFrameworks:
|
||||
"""Tests for print_compliance_frameworks with universal frameworks."""
|
||||
|
||||
def test_includes_universal_frameworks(self, capsys):
|
||||
"""Universal frameworks appear in the listing."""
|
||||
legacy = {"cis_1.4_aws": _make_legacy_compliance()}
|
||||
universal = {"csa_ccm_4.0": _make_universal_framework()}
|
||||
merged = {**legacy, **universal}
|
||||
|
||||
print_compliance_frameworks(merged)
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "cis_1.4_aws" in captured
|
||||
assert "csa_ccm_4.0" in captured
|
||||
|
||||
def test_count_includes_both(self, capsys):
|
||||
"""Framework count includes both legacy and universal."""
|
||||
legacy = {"cis_1.4_aws": _make_legacy_compliance()}
|
||||
universal = {"csa_ccm_4.0": _make_universal_framework()}
|
||||
merged = {**legacy, **universal}
|
||||
|
||||
print_compliance_frameworks(merged)
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "2" in captured
|
||||
|
||||
def test_universal_only(self, capsys):
|
||||
"""Works when only universal frameworks are present."""
|
||||
universal = {"csa_ccm_4.0": _make_universal_framework()}
|
||||
|
||||
print_compliance_frameworks(universal)
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "csa_ccm_4.0" in captured
|
||||
assert "1" in captured
|
||||
|
||||
|
||||
class TestPrintComplianceRequirements:
|
||||
"""Tests for print_compliance_requirements with universal frameworks."""
|
||||
|
||||
def test_list_checks_universal_framework(self, capsys):
|
||||
"""Requirements with dict checks are printed correctly."""
|
||||
fw = _make_universal_framework()
|
||||
all_fw = {"test_fw_1.0": fw}
|
||||
|
||||
print_compliance_requirements(all_fw, ["test_fw_1.0"])
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "1.1" in captured
|
||||
assert "check_a" in captured
|
||||
|
||||
def test_dict_checks_universal_framework(self, capsys):
|
||||
"""Requirements with dict checks show provider-prefixed checks."""
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="A&A-01",
|
||||
description="Audit & Assurance",
|
||||
attributes={"Section": "A&A"},
|
||||
checks={"aws": ["check_a", "check_b"], "azure": ["check_c"]},
|
||||
),
|
||||
]
|
||||
fw = ComplianceFramework(
|
||||
framework="CSA_CCM",
|
||||
name="CSA CCM 4.0",
|
||||
version="4.0",
|
||||
description="Cloud Controls Matrix",
|
||||
requirements=reqs,
|
||||
)
|
||||
all_fw = {"csa_ccm_4.0": fw}
|
||||
|
||||
print_compliance_requirements(all_fw, ["csa_ccm_4.0"])
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "A&A-01" in captured
|
||||
assert "[aws] check_a" in captured
|
||||
assert "[aws] check_b" in captured
|
||||
assert "[azure] check_c" in captured
|
||||
|
||||
def test_none_provider_shows_multi_provider(self, capsys):
|
||||
"""Frameworks with provider=None show 'Multi-provider'."""
|
||||
fw = ComplianceFramework(
|
||||
framework="CSA_CCM",
|
||||
name="CSA CCM 4.0",
|
||||
version="4.0",
|
||||
description="Cloud Controls Matrix",
|
||||
requirements=[
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
],
|
||||
)
|
||||
all_fw = {"csa_ccm_4.0": fw}
|
||||
|
||||
print_compliance_requirements(all_fw, ["csa_ccm_4.0"])
|
||||
captured = capsys.readouterr().out
|
||||
|
||||
assert "Multi-provider" in captured
|
||||
|
||||
|
||||
# ── Idempotency tests ────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestIdempotency:
|
||||
"""The function must be safe to invoke multiple times for the same
|
||||
framework. Repeated calls must reuse writers tracked in
|
||||
``generated_outputs["compliance"]`` instead of recreating them.
|
||||
|
||||
This guards against:
|
||||
- duplicate writer entries in generated_outputs (regular pipeline
|
||||
treats one writer per framework)
|
||||
- the OCSF append-bug where a second writer would emit
|
||||
``[...]<new>...]`` and break the JSON array.
|
||||
"""
|
||||
|
||||
def test_second_call_does_not_duplicate_writers(self, tmp_path):
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
kwargs = dict(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
first = process_universal_compliance_frameworks(**kwargs)
|
||||
first_count = len(generated["compliance"])
|
||||
second = process_universal_compliance_frameworks(**kwargs)
|
||||
second_count = len(generated["compliance"])
|
||||
|
||||
assert first == {"test_fw_1.0"}
|
||||
assert second == {"test_fw_1.0"} # still reported as processed
|
||||
assert first_count == 2 # CSV + OCSF
|
||||
assert second_count == 2 # NO duplication
|
||||
|
||||
def test_second_call_keeps_ocsf_json_valid(self, tmp_path):
|
||||
"""End-to-end: after two calls the OCSF JSON file must still be
|
||||
a single, valid JSON array — not the broken ``[...]...]`` form."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
kwargs = dict(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
process_universal_compliance_frameworks(**kwargs)
|
||||
process_universal_compliance_frameworks(**kwargs)
|
||||
|
||||
ocsf_path = tmp_path / "compliance" / "prowler_output_test_fw_1.0.ocsf.json"
|
||||
data = json.loads(ocsf_path.read_text()) # Will raise on invalid JSON
|
||||
assert isinstance(data, list)
|
||||
assert len(data) >= 1
|
||||
|
||||
def test_reuses_existing_writer_object(self, tmp_path):
|
||||
"""The CSV/OCSF writer instances appended on first call must be
|
||||
the SAME objects after a second call — not fresh ones."""
|
||||
fw = _make_universal_framework()
|
||||
generated = {"compliance": []}
|
||||
kwargs = dict(
|
||||
input_compliance_frameworks={"test_fw_1.0"},
|
||||
universal_frameworks={"test_fw_1.0": fw},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="prowler_output",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
|
||||
process_universal_compliance_frameworks(**kwargs)
|
||||
first_writers = list(generated["compliance"])
|
||||
process_universal_compliance_frameworks(**kwargs)
|
||||
second_writers = list(generated["compliance"])
|
||||
|
||||
# Same identity, same length — reused, not recreated.
|
||||
assert len(first_writers) == len(second_writers)
|
||||
for a, b in zip(first_writers, second_writers):
|
||||
assert a is b
|
||||
|
||||
def test_idempotency_across_mixed_frameworks(self, tmp_path):
|
||||
"""When the second call adds a new framework, the new one is
|
||||
created while existing ones are NOT recreated."""
|
||||
fw1 = _make_universal_framework(name="FW1", version="1.0")
|
||||
fw2 = _make_universal_framework(name="FW2", version="2.0")
|
||||
generated = {"compliance": []}
|
||||
|
||||
# First call: only FW1
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"fw1_1.0"},
|
||||
universal_frameworks={"fw1_1.0": fw1, "fw2_2.0": fw2},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
first_writers = list(generated["compliance"])
|
||||
assert len(first_writers) == 2
|
||||
|
||||
# Second call: includes both. FW1 must be reused, FW2 created fresh.
|
||||
process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks={"fw1_1.0", "fw2_2.0"},
|
||||
universal_frameworks={"fw1_1.0": fw1, "fw2_2.0": fw2},
|
||||
finding_outputs=[_make_finding("check_a")],
|
||||
output_directory=str(tmp_path),
|
||||
output_filename="out",
|
||||
provider="aws",
|
||||
generated_outputs=generated,
|
||||
)
|
||||
second_writers = list(generated["compliance"])
|
||||
assert len(second_writers) == 4 # 2 (FW1 reused) + 2 new (FW2)
|
||||
# FW1 writer instances unchanged
|
||||
assert second_writers[0] is first_writers[0]
|
||||
assert second_writers[1] is first_writers[1]
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
|
||||
from py_ocsf_models.events.base_event import StatusID as EventStatusID
|
||||
from py_ocsf_models.events.findings.compliance_finding import ComplianceFinding
|
||||
from py_ocsf_models.events.findings.compliance_finding_type_id import (
|
||||
ComplianceFindingTypeID,
|
||||
@@ -18,6 +19,7 @@ from prowler.lib.check.compliance_models import (
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.ocsf_compliance import (
|
||||
OCSFComplianceOutput,
|
||||
_sanitize_resource_data,
|
||||
)
|
||||
|
||||
|
||||
@@ -473,3 +475,159 @@ class TestOCSFComplianceOutput:
|
||||
cf = output.data[0]
|
||||
assert cf.unmapped["requirement_attributes"]["section"] == "Logging"
|
||||
assert "internal_note" not in cf.unmapped["requirement_attributes"]
|
||||
|
||||
|
||||
class TestSanitizeResourceData:
|
||||
"""Unit tests for the _sanitize_resource_data helper.
|
||||
|
||||
Service resources may carry non-JSON-serializable objects (e.g. raw
|
||||
Pydantic models such as ``Trail`` or ``LifecyclePolicy``). The helper
|
||||
must convert them so the resulting ComplianceFinding can be serialized.
|
||||
"""
|
||||
|
||||
def test_dict_passthrough(self):
|
||||
result = _sanitize_resource_data("details", {"a": 1, "b": "two"})
|
||||
assert result == {"details": "details", "metadata": {"a": 1, "b": "two"}}
|
||||
|
||||
def test_none_metadata(self):
|
||||
result = _sanitize_resource_data("details", None)
|
||||
assert result == {"details": "details", "metadata": None}
|
||||
|
||||
def test_pydantic_v2_model_dump(self):
|
||||
class FakeV2Model:
|
||||
def model_dump(self):
|
||||
return {"name": "trail-1", "region": "us-east-1"}
|
||||
|
||||
result = _sanitize_resource_data("d", {"trail": FakeV2Model()})
|
||||
assert result["metadata"]["trail"] == {
|
||||
"name": "trail-1",
|
||||
"region": "us-east-1",
|
||||
}
|
||||
|
||||
def test_pydantic_v1_dict(self):
|
||||
class FakeV1Model:
|
||||
def dict(self):
|
||||
return {"name": "policy-1", "schedule": "daily"}
|
||||
|
||||
result = _sanitize_resource_data("d", {"policy": FakeV1Model()})
|
||||
assert result["metadata"]["policy"] == {
|
||||
"name": "policy-1",
|
||||
"schedule": "daily",
|
||||
}
|
||||
|
||||
def test_nested_pydantic_in_list(self):
|
||||
class FakeModel:
|
||||
def model_dump(self):
|
||||
return {"id": "x"}
|
||||
|
||||
result = _sanitize_resource_data("d", {"items": [FakeModel(), FakeModel()]})
|
||||
assert result["metadata"]["items"] == [{"id": "x"}, {"id": "x"}]
|
||||
|
||||
def test_nested_dict_recursion(self):
|
||||
class FakeInner:
|
||||
def model_dump(self):
|
||||
return {"k": "v"}
|
||||
|
||||
result = _sanitize_resource_data(
|
||||
"d", {"outer": {"inner": FakeInner(), "x": [1, 2]}}
|
||||
)
|
||||
assert result["metadata"]["outer"]["inner"] == {"k": "v"}
|
||||
assert result["metadata"]["outer"]["x"] == [1, 2]
|
||||
|
||||
def test_tuple_to_list(self):
|
||||
result = _sanitize_resource_data("d", {"t": (1, 2, "three")})
|
||||
assert result["metadata"]["t"] == [1, 2, "three"]
|
||||
|
||||
def test_non_string_dict_keys_coerced(self):
|
||||
result = _sanitize_resource_data("d", {1: "a", 2: "b"})
|
||||
assert result["metadata"] == {"1": "a", "2": "b"}
|
||||
|
||||
def test_unknown_object_falls_back_to_str(self):
|
||||
class Opaque:
|
||||
def __str__(self):
|
||||
return "opaque-repr"
|
||||
|
||||
result = _sanitize_resource_data("d", {"thing": Opaque()})
|
||||
assert result["metadata"]["thing"] == "opaque-repr"
|
||||
|
||||
def test_circular_reference_falls_back_to_empty(self):
|
||||
a = {}
|
||||
a["self"] = a
|
||||
# json.dumps raises ValueError on recursion → fallback to empty metadata
|
||||
result = _sanitize_resource_data("d", a)
|
||||
assert result == {"details": "d", "metadata": {}}
|
||||
|
||||
def test_serializes_via_full_finding_pipeline(self):
|
||||
"""End-to-end: a finding with a non-serializable resource_metadata
|
||||
produces a JSON-serializable ComplianceFinding."""
|
||||
|
||||
class TrailLike:
|
||||
def __init__(self):
|
||||
self.name = "trail-A"
|
||||
self.kms_key_id = "arn:aws:kms:..."
|
||||
|
||||
def model_dump(self):
|
||||
return {"name": self.name, "kms_key_id": self.kms_key_id}
|
||||
|
||||
finding = _make_finding("check_a")
|
||||
finding.resource_metadata = {"trail": TrailLike()}
|
||||
req = _simple_requirement()
|
||||
fw = _make_framework([req])
|
||||
|
||||
output = OCSFComplianceOutput(findings=[finding], framework=fw, provider="aws")
|
||||
|
||||
# Serialize the resulting ComplianceFinding — must NOT raise
|
||||
cf = output.data[0]
|
||||
if hasattr(cf, "model_dump_json"):
|
||||
json_output = cf.model_dump_json(exclude_none=True)
|
||||
else:
|
||||
json_output = cf.json(exclude_none=True)
|
||||
payload = json.loads(json_output)
|
||||
|
||||
# Confirm the trail object made it through as a plain dict
|
||||
assert payload["resources"][0]["data"]["metadata"]["trail"]["name"] == "trail-A"
|
||||
|
||||
|
||||
class TestEventStatusInline:
|
||||
"""Tests for the inlined event_status logic that replaced
|
||||
OCSF.get_finding_status_id() to break the cyclic import."""
|
||||
|
||||
def test_unmuted_finding_status_new(self):
|
||||
finding = _make_finding("check_a")
|
||||
finding.muted = False
|
||||
req = _simple_requirement()
|
||||
fw = _make_framework([req])
|
||||
|
||||
output = OCSFComplianceOutput(findings=[finding], framework=fw, provider="aws")
|
||||
cf = output.data[0]
|
||||
|
||||
assert cf.status_id == EventStatusID.New.value
|
||||
assert cf.status == EventStatusID.New.name
|
||||
|
||||
def test_muted_finding_status_suppressed(self):
|
||||
finding = _make_finding("check_a")
|
||||
finding.muted = True
|
||||
req = _simple_requirement()
|
||||
fw = _make_framework([req])
|
||||
|
||||
output = OCSFComplianceOutput(findings=[finding], framework=fw, provider="aws")
|
||||
cf = output.data[0]
|
||||
|
||||
assert cf.status_id == EventStatusID.Suppressed.value
|
||||
assert cf.status == EventStatusID.Suppressed.name
|
||||
|
||||
|
||||
class TestNoTopLevelOCSFImport:
|
||||
"""Regression test: the top-level OCSF/Finding imports were removed
|
||||
to break the CodeQL cyclic-import warnings. Ensure they stay out of
|
||||
the runtime namespace of the module (TYPE_CHECKING block only)."""
|
||||
|
||||
def test_finding_not_in_runtime_namespace(self):
|
||||
import prowler.lib.outputs.compliance.universal.ocsf_compliance as mod
|
||||
|
||||
assert "Finding" not in dir(mod)
|
||||
|
||||
def test_ocsf_class_not_imported(self):
|
||||
import prowler.lib.outputs.compliance.universal.ocsf_compliance as mod
|
||||
|
||||
assert "OCSF" not in dir(mod)
|
||||
|
||||
@@ -0,0 +1,568 @@
|
||||
from types import SimpleNamespace
|
||||
|
||||
from prowler.lib.check.compliance_models import (
|
||||
AttributeMetadata,
|
||||
ComplianceFramework,
|
||||
OutputFormats,
|
||||
OutputsConfig,
|
||||
TableConfig,
|
||||
UniversalComplianceRequirement,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.universal.universal_output import (
|
||||
UniversalComplianceOutput,
|
||||
)
|
||||
|
||||
|
||||
def _make_finding(check_id, status="PASS", compliance_map=None):
|
||||
"""Create a mock Finding for output tests."""
|
||||
finding = SimpleNamespace()
|
||||
finding.provider = "aws"
|
||||
finding.account_uid = "123456789012"
|
||||
finding.account_name = "test-account"
|
||||
finding.region = "us-east-1"
|
||||
finding.status = status
|
||||
finding.status_extended = f"{check_id} is {status}"
|
||||
finding.resource_uid = f"arn:aws:iam::123456789012:{check_id}"
|
||||
finding.resource_name = check_id
|
||||
finding.muted = False
|
||||
finding.check_id = check_id
|
||||
finding.metadata = SimpleNamespace(
|
||||
Provider="aws",
|
||||
CheckID=check_id,
|
||||
Severity="medium",
|
||||
)
|
||||
finding.compliance = compliance_map or {}
|
||||
return finding
|
||||
|
||||
|
||||
def _make_framework(requirements, attrs_metadata=None, table_config=None):
|
||||
return ComplianceFramework(
|
||||
framework="TestFW",
|
||||
name="Test Framework",
|
||||
provider="AWS",
|
||||
version="1.0",
|
||||
description="Test framework",
|
||||
requirements=requirements,
|
||||
attributes_metadata=attrs_metadata,
|
||||
outputs=OutputsConfig(table_config=table_config) if table_config else None,
|
||||
)
|
||||
|
||||
|
||||
class TestDynamicCSVColumns:
|
||||
def test_columns_match_metadata(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM", "SubSection": "Auth"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
AttributeMetadata(key="SubSection", type="str"),
|
||||
]
|
||||
fw = _make_framework(reqs, metadata, TableConfig(group_by="Section"))
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"TestFW-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
|
||||
assert len(output.data) == 1
|
||||
row_dict = output.data[0].dict()
|
||||
assert "Requirements_Attributes_Section" in row_dict
|
||||
assert "Requirements_Attributes_SubSection" in row_dict
|
||||
assert row_dict["Requirements_Attributes_Section"] == "IAM"
|
||||
assert row_dict["Requirements_Attributes_SubSection"] == "Auth"
|
||||
|
||||
|
||||
class TestManualRequirements:
|
||||
def test_manual_status(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
UniversalComplianceRequirement(
|
||||
id="manual-1",
|
||||
description="manual check",
|
||||
attributes={"Section": "Governance"},
|
||||
checks={},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
fw = _make_framework(reqs, metadata, TableConfig(group_by="Section"))
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"TestFW-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
|
||||
# Should have 1 real finding + 1 manual
|
||||
assert len(output.data) == 2
|
||||
manual_rows = [r for r in output.data if r.dict()["Status"] == "MANUAL"]
|
||||
assert len(manual_rows) == 1
|
||||
assert manual_rows[0].dict()["Requirements_Id"] == "manual-1"
|
||||
assert manual_rows[0].dict()["ResourceId"] == "manual_check"
|
||||
|
||||
|
||||
class TestMITREExtraColumns:
|
||||
def test_mitre_columns_present(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="T1190",
|
||||
description="Exploit",
|
||||
attributes={},
|
||||
checks={"aws": ["check_a"]},
|
||||
tactics=["Initial Access"],
|
||||
sub_techniques=[],
|
||||
platforms=["IaaS"],
|
||||
technique_url="https://attack.mitre.org/techniques/T1190/",
|
||||
),
|
||||
]
|
||||
fw = _make_framework(reqs, None, TableConfig(group_by="_Tactics"))
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"TestFW-1.0": ["T1190"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
|
||||
assert len(output.data) == 1
|
||||
row_dict = output.data[0].dict()
|
||||
assert "Requirements_Tactics" in row_dict
|
||||
assert row_dict["Requirements_Tactics"] == "Initial Access"
|
||||
assert "Requirements_TechniqueURL" in row_dict
|
||||
|
||||
|
||||
class TestCSVFileWrite:
|
||||
def test_batch_write(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
fw = _make_framework(reqs, metadata, TableConfig(group_by="Section"))
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"TestFW-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
output.batch_write_data_to_file()
|
||||
|
||||
# Verify file was created and has content
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
assert "PROVIDER" in content # Headers are uppercase
|
||||
assert "REQUIREMENTS_ATTRIBUTES_SECTION" in content
|
||||
assert "IAM" in content
|
||||
|
||||
|
||||
class TestNoFindings:
|
||||
def test_empty_findings_no_data(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
]
|
||||
fw = _make_framework(reqs, None, TableConfig(group_by="Section"))
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=[],
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
assert len(output.data) == 0
|
||||
|
||||
|
||||
class TestMultiProviderOutput:
|
||||
def test_dict_checks_filtered_by_provider(self, tmp_path):
|
||||
"""Only checks for the given provider appear in CSV output."""
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"], "azure": ["check_b"]},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
fw = ComplianceFramework(
|
||||
framework="MultiCloud",
|
||||
name="Multi",
|
||||
version="1.0",
|
||||
description="Test multi-provider",
|
||||
requirements=reqs,
|
||||
attributes_metadata=metadata,
|
||||
outputs=OutputsConfig(table_config=TableConfig(group_by="Section")),
|
||||
)
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"MultiCloud-1.0": ["1.1"]}),
|
||||
_make_finding("check_b", "FAIL", {"MultiCloud-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
provider="aws",
|
||||
)
|
||||
|
||||
# Only check_a should match (it's the AWS check)
|
||||
assert len(output.data) == 1
|
||||
row_dict = output.data[0].dict()
|
||||
assert row_dict["Requirements_Attributes_Section"] == "IAM"
|
||||
|
||||
def test_no_provider_includes_all(self, tmp_path):
|
||||
"""Without provider filter, all checks from all providers are included."""
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={"aws": ["check_a"], "azure": ["check_b"]},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
fw = ComplianceFramework(
|
||||
framework="MultiCloud",
|
||||
name="Multi",
|
||||
version="1.0",
|
||||
description="Test multi-provider",
|
||||
requirements=reqs,
|
||||
attributes_metadata=metadata,
|
||||
outputs=OutputsConfig(table_config=TableConfig(group_by="Section")),
|
||||
)
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"MultiCloud-1.0": ["1.1"]}),
|
||||
_make_finding("check_b", "FAIL", {"MultiCloud-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
|
||||
# Both checks should be included without provider filter
|
||||
assert len(output.data) == 2
|
||||
|
||||
def test_empty_dict_checks_is_manual(self, tmp_path):
|
||||
"""Requirement with empty dict checks is treated as manual."""
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="manual-1",
|
||||
description="manual check",
|
||||
attributes={"Section": "Governance"},
|
||||
checks={},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
fw = ComplianceFramework(
|
||||
framework="MultiCloud",
|
||||
name="Multi",
|
||||
version="1.0",
|
||||
description="Test",
|
||||
requirements=reqs,
|
||||
attributes_metadata=metadata,
|
||||
outputs=OutputsConfig(table_config=TableConfig(group_by="Section")),
|
||||
)
|
||||
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=[_make_finding("other_check", "PASS", {})],
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
provider="aws",
|
||||
)
|
||||
|
||||
manual_rows = [r for r in output.data if r.dict()["Status"] == "MANUAL"]
|
||||
assert len(manual_rows) == 1
|
||||
assert manual_rows[0].dict()["Requirements_Id"] == "manual-1"
|
||||
|
||||
|
||||
class TestCSVExclude:
|
||||
def test_csv_false_excludes_column(self, tmp_path):
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM", "Internal": "hidden"},
|
||||
checks={"aws": ["check_a"]},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(
|
||||
key="Section", type="str", output_formats=OutputFormats(csv=True)
|
||||
),
|
||||
AttributeMetadata(
|
||||
key="Internal", type="str", output_formats=OutputFormats(csv=False)
|
||||
),
|
||||
]
|
||||
fw = _make_framework(reqs, metadata, TableConfig(group_by="Section"))
|
||||
|
||||
findings = [
|
||||
_make_finding("check_a", "PASS", {"TestFW-1.0": ["1.1"]}),
|
||||
]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
)
|
||||
|
||||
row_dict = output.data[0].dict()
|
||||
assert "Requirements_Attributes_Section" in row_dict
|
||||
assert "Requirements_Attributes_Internal" not in row_dict
|
||||
|
||||
|
||||
def _make_provider_finding(provider, check_id="check_a", status="PASS"):
|
||||
"""Create a mock Finding with a specific provider."""
|
||||
finding = _make_finding(check_id, status, {"TestFW-1.0": ["1.1"]})
|
||||
finding.provider = provider
|
||||
return finding
|
||||
|
||||
|
||||
def _simple_framework():
|
||||
all_providers = [
|
||||
"aws",
|
||||
"azure",
|
||||
"gcp",
|
||||
"kubernetes",
|
||||
"m365",
|
||||
"github",
|
||||
"oraclecloud",
|
||||
"alibabacloud",
|
||||
"nhn",
|
||||
"unknown",
|
||||
]
|
||||
reqs = [
|
||||
UniversalComplianceRequirement(
|
||||
id="1.1",
|
||||
description="test",
|
||||
attributes={"Section": "IAM"},
|
||||
checks={p: ["check_a"] for p in all_providers},
|
||||
),
|
||||
]
|
||||
metadata = [
|
||||
AttributeMetadata(key="Section", type="str"),
|
||||
]
|
||||
return _make_framework(reqs, metadata, TableConfig(group_by="Section"))
|
||||
|
||||
|
||||
class TestProviderHeaders:
|
||||
def test_aws_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("aws")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="aws",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "AccountId" in row_dict
|
||||
assert "Region" in row_dict
|
||||
assert row_dict["AccountId"] == "123456789012"
|
||||
assert row_dict["Region"] == "us-east-1"
|
||||
|
||||
def test_azure_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("azure")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="azure",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "SubscriptionId" in row_dict
|
||||
assert "Location" in row_dict
|
||||
assert row_dict["SubscriptionId"] == "123456789012"
|
||||
assert row_dict["Location"] == "us-east-1"
|
||||
|
||||
def test_gcp_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("gcp")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="gcp",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "ProjectId" in row_dict
|
||||
assert "Location" in row_dict
|
||||
assert row_dict["ProjectId"] == "123456789012"
|
||||
|
||||
def test_kubernetes_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("kubernetes")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="kubernetes",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "Context" in row_dict
|
||||
assert "Namespace" in row_dict
|
||||
# Kubernetes Context maps to account_name
|
||||
assert row_dict["Context"] == "test-account"
|
||||
assert row_dict["Namespace"] == "us-east-1"
|
||||
|
||||
def test_github_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("github")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="github",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "Account_Name" in row_dict
|
||||
assert "Account_Id" in row_dict
|
||||
# GitHub: Account_Name (pos 3) from account_name, Account_Id (pos 4) from account_uid
|
||||
assert row_dict["Account_Name"] == "test-account"
|
||||
assert row_dict["Account_Id"] == "123456789012"
|
||||
# Verify column order matches legacy (Account_Name before Account_Id)
|
||||
keys = list(row_dict.keys())
|
||||
assert keys.index("Account_Name") < keys.index("Account_Id")
|
||||
|
||||
def test_unknown_provider_defaults(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("unknown")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
provider="unknown",
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "AccountId" in row_dict
|
||||
assert "Region" in row_dict
|
||||
|
||||
def test_none_provider_defaults(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("aws")]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / "test.csv"),
|
||||
)
|
||||
row_dict = output.data[0].dict()
|
||||
assert "AccountId" in row_dict
|
||||
assert "Region" in row_dict
|
||||
|
||||
def test_csv_write_azure_headers(self, tmp_path):
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding("azure")]
|
||||
filepath = str(tmp_path / "test.csv")
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=filepath,
|
||||
provider="azure",
|
||||
)
|
||||
output.batch_write_data_to_file()
|
||||
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
assert "SUBSCRIPTIONID" in content
|
||||
assert "LOCATION" in content
|
||||
# Should NOT have the default AccountId/Region headers
|
||||
assert "ACCOUNTID" not in content
|
||||
|
||||
def test_column_order_matches_legacy(self, tmp_path):
|
||||
"""Verify that the base column order matches the legacy per-provider models.
|
||||
|
||||
Legacy models all define: Provider, Description, <col3>, <col4>, AssessmentDate, ...
|
||||
The universal output must preserve this exact order for backward compatibility.
|
||||
"""
|
||||
# Expected column order per provider (positions 3 and 4 after Provider, Description)
|
||||
legacy_order = {
|
||||
"aws": ("AccountId", "Region"),
|
||||
"azure": ("SubscriptionId", "Location"),
|
||||
"gcp": ("ProjectId", "Location"),
|
||||
"kubernetes": ("Context", "Namespace"),
|
||||
"m365": ("TenantId", "Location"),
|
||||
"github": ("Account_Name", "Account_Id"),
|
||||
"oraclecloud": ("TenancyId", "Region"),
|
||||
"alibabacloud": ("AccountId", "Region"),
|
||||
"nhn": ("AccountId", "Region"),
|
||||
}
|
||||
|
||||
for provider_name, (expected_col3, expected_col4) in legacy_order.items():
|
||||
fw = _simple_framework()
|
||||
findings = [_make_provider_finding(provider_name)]
|
||||
output = UniversalComplianceOutput(
|
||||
findings=findings,
|
||||
framework=fw,
|
||||
file_path=str(tmp_path / f"test_{provider_name}.csv"),
|
||||
provider=provider_name,
|
||||
)
|
||||
keys = list(output.data[0].dict().keys())
|
||||
assert keys[0] == "Provider", f"{provider_name}: col 1 should be Provider"
|
||||
assert (
|
||||
keys[1] == "Description"
|
||||
), f"{provider_name}: col 2 should be Description"
|
||||
assert (
|
||||
keys[2] == expected_col3
|
||||
), f"{provider_name}: col 3 should be {expected_col3}, got {keys[2]}"
|
||||
assert (
|
||||
keys[3] == expected_col4
|
||||
), f"{provider_name}: col 4 should be {expected_col4}, got {keys[3]}"
|
||||
assert (
|
||||
keys[4] == "AssessmentDate"
|
||||
), f"{provider_name}: col 5 should be AssessmentDate"
|
||||
@@ -557,7 +557,7 @@ class TestFinding:
|
||||
assert finding_output.resource_tags == {}
|
||||
assert finding_output.partition is None
|
||||
assert finding_output.account_uid == "test_cluster"
|
||||
assert finding_output.provider_uid == "In-Cluster"
|
||||
assert finding_output.provider_uid == "test_cluster"
|
||||
assert finding_output.account_name == "context: In-Cluster"
|
||||
assert finding_output.account_email is None
|
||||
assert finding_output.account_organization_uid is None
|
||||
@@ -591,6 +591,40 @@ class TestFinding:
|
||||
assert finding_output.metadata.Notes == "mock_notes"
|
||||
assert finding_output.metadata.Compliance == []
|
||||
|
||||
def test_generate_output_kubernetes_kubeconfig(self):
|
||||
# Mock provider
|
||||
provider = MagicMock()
|
||||
provider.type = "kubernetes"
|
||||
provider.identity.context = "test-context"
|
||||
provider.identity.cluster = "test_cluster"
|
||||
|
||||
# Mock check result
|
||||
check_output = MagicMock()
|
||||
check_output.resource_name = "test_resource_name"
|
||||
check_output.resource_id = "test_resource_id"
|
||||
check_output.namespace = "test_namespace"
|
||||
check_output.resource_details = "test_resource_details"
|
||||
check_output.status = Status.PASS
|
||||
check_output.status_extended = "mock_status_extended"
|
||||
check_output.muted = False
|
||||
check_output.check_metadata = mock_check_metadata(provider="kubernetes")
|
||||
check_output.timestamp = datetime.now()
|
||||
check_output.resource = {}
|
||||
check_output.compliance = {}
|
||||
|
||||
# Mock Output Options
|
||||
output_options = MagicMock()
|
||||
output_options.unix_timestamp = True
|
||||
|
||||
# Generate the finding
|
||||
finding_output = Finding.generate_output(provider, check_output, output_options)
|
||||
|
||||
assert isinstance(finding_output, Finding)
|
||||
assert finding_output.auth_method == "kubeconfig"
|
||||
assert finding_output.account_uid == "test_cluster"
|
||||
assert finding_output.provider_uid == "test-context"
|
||||
assert finding_output.account_name == "context: test-context"
|
||||
|
||||
def test_generate_output_github_personal_access_token(self):
|
||||
"""Test GitHub output generation with Personal Access Token authentication."""
|
||||
# Mock provider using Personal Access Token
|
||||
|
||||
@@ -21,6 +21,7 @@ from prowler.providers.aws.config import (
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION,
|
||||
BOTO3_USER_AGENT_EXTRA,
|
||||
ROLE_SESSION_NAME,
|
||||
get_default_session_config,
|
||||
)
|
||||
from prowler.providers.aws.exceptions.exceptions import (
|
||||
AWSArgumentTypeValidationError,
|
||||
@@ -2242,6 +2243,12 @@ aws:
|
||||
assert session_config.user_agent_extra == BOTO3_USER_AGENT_EXTRA
|
||||
assert session_config.retries == {"max_attempts": 10, "mode": "standard"}
|
||||
|
||||
def test_get_default_session_config(self):
|
||||
config = get_default_session_config()
|
||||
|
||||
assert config.user_agent_extra == BOTO3_USER_AGENT_EXTRA
|
||||
assert config.retries == {"max_attempts": 3, "mode": "standard"}
|
||||
|
||||
@mock_aws
|
||||
@patch(
|
||||
"prowler.lib.check.utils.recover_checks_from_provider",
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
from moto import mock_aws
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _mock_aws_globally():
|
||||
"""Activate moto's mock_aws for every test under tests/providers/aws/.
|
||||
|
||||
This prevents any test from accidentally hitting real AWS endpoints,
|
||||
even if it forgets to add @mock_aws on the method. Tests that never
|
||||
call boto3 are unaffected (mock_aws is a no-op in that case).
|
||||
"""
|
||||
with mock_aws():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _detect_aws_leaks():
|
||||
"""Fail the test if any HTTP request reaches a real AWS endpoint."""
|
||||
calls = []
|
||||
original_send = None
|
||||
|
||||
try:
|
||||
from botocore.httpsession import URLLib3Session
|
||||
|
||||
original_send = URLLib3Session.send
|
||||
except ImportError:
|
||||
yield
|
||||
return
|
||||
|
||||
def tracking_send(self, request):
|
||||
url = getattr(request, "url", str(request))
|
||||
if ".amazonaws.com" in url:
|
||||
calls.append(url)
|
||||
return original_send(self, request)
|
||||
|
||||
with patch.object(URLLib3Session, "send", tracking_send):
|
||||
yield
|
||||
|
||||
if calls:
|
||||
pytest.fail(
|
||||
f"Test leaked {len(calls)} real AWS call(s):\n"
|
||||
+ "\n".join(f" - {url}" for url in calls[:5])
|
||||
)
|
||||
@@ -4,6 +4,8 @@ import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from moto import mock_aws
|
||||
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.aws.config import BOTO3_USER_AGENT_EXTRA
|
||||
from prowler.providers.aws.lib.organizations.organizations import (
|
||||
_get_ou_metadata,
|
||||
get_organizations_metadata,
|
||||
@@ -222,6 +224,20 @@ class Test_AWS_Organizations:
|
||||
assert tags == {}
|
||||
assert ou_metadata == {}
|
||||
|
||||
def test_get_organizations_metadata_uses_user_agent_extra(self):
|
||||
real_session = boto3.Session()
|
||||
real_session._session.set_default_client_config(
|
||||
AwsProvider.set_session_config(None)
|
||||
)
|
||||
wrapper = MagicMock(wraps=real_session)
|
||||
|
||||
get_organizations_metadata("123456789012", wrapper)
|
||||
|
||||
wrapper.client.assert_called_once()
|
||||
default_config = real_session._session.get_default_client_config()
|
||||
assert default_config is not None
|
||||
assert BOTO3_USER_AGENT_EXTRA in default_config.user_agent_extra
|
||||
|
||||
def test_parse_organizations_metadata_with_empty_ou_metadata(self):
|
||||
tags = {"Tags": []}
|
||||
metadata = {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from mock import patch
|
||||
|
||||
from prowler.providers.aws.config import BOTO3_USER_AGENT_EXTRA
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
from tests.providers.aws.utils import (
|
||||
AWS_ACCOUNT_ARN,
|
||||
@@ -189,6 +190,15 @@ class TestAWSService:
|
||||
== f"arn:{service.audited_partition}:{service_name}::{AWS_ACCOUNT_NUMBER}:bucket/unknown"
|
||||
)
|
||||
|
||||
def test_AWSService_clients_carry_user_agent_extra(self):
|
||||
provider = set_mocked_aws_provider()
|
||||
|
||||
service = AWSService("s3", provider)
|
||||
ad_hoc_client = service.session.client("ec2", AWS_REGION_US_EAST_1)
|
||||
|
||||
assert BOTO3_USER_AGENT_EXTRA in service.client._client_config.user_agent_extra
|
||||
assert BOTO3_USER_AGENT_EXTRA in ad_hoc_client._client_config.user_agent_extra
|
||||
|
||||
def test_AWSService_get_unknown_arn_resource_type_set_region(self):
|
||||
service_name = "s3"
|
||||
provider = set_mocked_aws_provider()
|
||||
|
||||
+302
@@ -0,0 +1,302 @@
|
||||
from unittest import mock
|
||||
|
||||
import botocore
|
||||
from botocore.exceptions import ClientError
|
||||
from moto import mock_aws
|
||||
|
||||
from tests.providers.aws.utils import (
|
||||
AWS_ACCOUNT_NUMBER,
|
||||
AWS_REGION_EU_WEST_1,
|
||||
AWS_REGION_US_EAST_1,
|
||||
set_mocked_aws_provider,
|
||||
)
|
||||
|
||||
make_api_call = botocore.client.BaseClient._make_api_call
|
||||
|
||||
GUARDRAIL_ARN = (
|
||||
f"arn:aws:bedrock:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:guardrail/test-id"
|
||||
)
|
||||
|
||||
|
||||
def mock_make_api_call_with_guardrail(self, operation_name, kwarg):
|
||||
"""Mock API call returning one guardrail in us-east-1."""
|
||||
if operation_name == "ListGuardrails":
|
||||
return {
|
||||
"guardrails": [
|
||||
{
|
||||
"id": "test-id",
|
||||
"arn": GUARDRAIL_ARN,
|
||||
"status": "READY",
|
||||
"name": "test-guardrail",
|
||||
}
|
||||
]
|
||||
}
|
||||
elif operation_name == "GetGuardrail":
|
||||
return {
|
||||
"name": "test-guardrail",
|
||||
"guardrailId": "test-id",
|
||||
"guardrailArn": GUARDRAIL_ARN,
|
||||
"status": "READY",
|
||||
"blockedInputMessaging": "Blocked",
|
||||
"blockedOutputsMessaging": "Blocked",
|
||||
"contentPolicy": {"filters": []},
|
||||
}
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
def mock_make_api_call_without_guardrails(self, operation_name, kwarg):
|
||||
"""Mock API call returning no guardrails."""
|
||||
if operation_name == "ListGuardrails":
|
||||
return {"guardrails": []}
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
def mock_make_api_call_guardrails_only_in_us_east_1(self, operation_name, kwarg):
|
||||
"""Mock API call returning a guardrail only in us-east-1 and none elsewhere."""
|
||||
if operation_name == "ListGuardrails":
|
||||
if self.meta.region_name == AWS_REGION_US_EAST_1:
|
||||
return {
|
||||
"guardrails": [
|
||||
{
|
||||
"id": "test-id",
|
||||
"arn": GUARDRAIL_ARN,
|
||||
"status": "READY",
|
||||
"name": "test-guardrail",
|
||||
}
|
||||
]
|
||||
}
|
||||
return {"guardrails": []}
|
||||
elif operation_name == "GetGuardrail":
|
||||
return {
|
||||
"name": "test-guardrail",
|
||||
"guardrailId": "test-id",
|
||||
"guardrailArn": GUARDRAIL_ARN,
|
||||
"status": "READY",
|
||||
"blockedInputMessaging": "Blocked",
|
||||
"blockedOutputsMessaging": "Blocked",
|
||||
"contentPolicy": {"filters": []},
|
||||
}
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
def mock_make_api_call_guardrail_validation_exception(self, operation_name, kwarg):
|
||||
"""Mock API call raising ValidationException for ListGuardrails."""
|
||||
if operation_name == "ListGuardrails":
|
||||
raise ClientError(
|
||||
{
|
||||
"Error": {
|
||||
"Code": "ValidationException",
|
||||
"Message": "Guardrails are not supported in this region.",
|
||||
}
|
||||
},
|
||||
operation_name,
|
||||
)
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
class Test_bedrock_guardrails_configured:
|
||||
@mock.patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_without_guardrails,
|
||||
)
|
||||
@mock_aws
|
||||
def test_no_guardrails_single_region(self):
|
||||
"""Test FAIL when no guardrails are configured in a single region."""
|
||||
from prowler.providers.aws.services.bedrock.bedrock_service import Bedrock
|
||||
|
||||
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured.bedrock_client",
|
||||
new=Bedrock(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured import (
|
||||
bedrock_guardrails_configured,
|
||||
)
|
||||
|
||||
check = bedrock_guardrails_configured()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"Bedrock has no guardrails configured in region {AWS_REGION_US_EAST_1}."
|
||||
)
|
||||
assert result[0].resource_id == "bedrock-guardrails"
|
||||
assert (
|
||||
result[0].resource_arn
|
||||
== f"arn:aws:bedrock:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:guardrails"
|
||||
)
|
||||
assert result[0].region == AWS_REGION_US_EAST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
@mock_aws
|
||||
@mock.patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_without_guardrails,
|
||||
)
|
||||
def test_no_guardrails_multi_region(self):
|
||||
"""Test FAIL in both regions when no guardrails are configured."""
|
||||
from prowler.providers.aws.services.bedrock.bedrock_service import Bedrock
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1]
|
||||
)
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured.bedrock_client",
|
||||
new=Bedrock(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured import (
|
||||
bedrock_guardrails_configured,
|
||||
)
|
||||
|
||||
check = bedrock_guardrails_configured()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 2
|
||||
assert result[0].status == "FAIL"
|
||||
assert result[0].resource_id == "bedrock-guardrails"
|
||||
assert result[0].resource_tags == []
|
||||
assert result[1].status == "FAIL"
|
||||
assert result[1].resource_id == "bedrock-guardrails"
|
||||
assert result[1].resource_tags == []
|
||||
|
||||
@mock.patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_with_guardrail,
|
||||
)
|
||||
@mock_aws
|
||||
def test_guardrail_configured(self):
|
||||
"""Test PASS when at least one guardrail is configured in the region."""
|
||||
from prowler.providers.aws.services.bedrock.bedrock_service import Bedrock
|
||||
|
||||
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured.bedrock_client",
|
||||
new=Bedrock(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured import (
|
||||
bedrock_guardrails_configured,
|
||||
)
|
||||
|
||||
check = bedrock_guardrails_configured()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "PASS"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"Bedrock guardrail test-guardrail is available in region {AWS_REGION_US_EAST_1}. This does not confirm that the guardrail is attached to agents or used on model invocations."
|
||||
)
|
||||
assert result[0].resource_id == "test-id"
|
||||
assert result[0].resource_arn == GUARDRAIL_ARN
|
||||
assert result[0].region == AWS_REGION_US_EAST_1
|
||||
assert result[0].resource_tags == []
|
||||
|
||||
@mock.patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_guardrails_only_in_us_east_1,
|
||||
)
|
||||
@mock_aws
|
||||
def test_guardrails_in_one_region_only(self):
|
||||
"""Test PASS in the region with a guardrail and FAIL in the region without one."""
|
||||
from prowler.providers.aws.services.bedrock.bedrock_service import Bedrock
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1]
|
||||
)
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured.bedrock_client",
|
||||
new=Bedrock(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured import (
|
||||
bedrock_guardrails_configured,
|
||||
)
|
||||
|
||||
check = bedrock_guardrails_configured()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 2
|
||||
|
||||
results_by_region = {r.region: r for r in result}
|
||||
|
||||
eu_result = results_by_region[AWS_REGION_EU_WEST_1]
|
||||
assert eu_result.status == "FAIL"
|
||||
assert (
|
||||
eu_result.status_extended
|
||||
== f"Bedrock has no guardrails configured in region {AWS_REGION_EU_WEST_1}."
|
||||
)
|
||||
assert eu_result.resource_id == "bedrock-guardrails"
|
||||
assert (
|
||||
eu_result.resource_arn
|
||||
== f"arn:aws:bedrock:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:guardrails"
|
||||
)
|
||||
assert eu_result.resource_tags == []
|
||||
|
||||
us_result = results_by_region[AWS_REGION_US_EAST_1]
|
||||
assert us_result.status == "PASS"
|
||||
assert (
|
||||
us_result.status_extended
|
||||
== f"Bedrock guardrail test-guardrail is available in region {AWS_REGION_US_EAST_1}. This does not confirm that the guardrail is attached to agents or used on model invocations."
|
||||
)
|
||||
assert us_result.resource_id == "test-id"
|
||||
assert us_result.resource_arn == GUARDRAIL_ARN
|
||||
assert us_result.resource_tags == []
|
||||
|
||||
@mock.patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_guardrail_validation_exception,
|
||||
)
|
||||
@mock_aws
|
||||
def test_guardrails_unsupported_region_is_skipped(self):
|
||||
"""Test unsupported regions are skipped instead of failing."""
|
||||
from prowler.providers.aws.services.bedrock.bedrock_service import Bedrock
|
||||
|
||||
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured.bedrock_client",
|
||||
new=Bedrock(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.bedrock.bedrock_guardrails_configured.bedrock_guardrails_configured import (
|
||||
bedrock_guardrails_configured,
|
||||
)
|
||||
|
||||
check = bedrock_guardrails_configured()
|
||||
result = check.execute()
|
||||
|
||||
assert len(result) == 0
|
||||
@@ -45,11 +45,12 @@ def mock_make_api_call(self, operation_name, kwarg):
|
||||
elif operation_name == "ListBuildsForProject":
|
||||
return {"ids": [build_id]}
|
||||
elif operation_name == "BatchGetBuilds":
|
||||
return {"builds": [{"endTime": last_invoked_time}]}
|
||||
return {"builds": [{"id": build_id, "endTime": last_invoked_time}]}
|
||||
elif operation_name == "BatchGetProjects":
|
||||
return {
|
||||
"projects": [
|
||||
{
|
||||
"arn": project_arn,
|
||||
"source": {
|
||||
"type": source_type,
|
||||
"location": bitbucket_url,
|
||||
@@ -230,3 +231,97 @@ class Test_Codebuild_Service:
|
||||
assert (
|
||||
codebuild.report_groups[report_group_arn].tags[0]["value"] == project_name
|
||||
)
|
||||
|
||||
|
||||
# Module-level state and helpers used by the chunking/out-of-order test below.
|
||||
# Kept at module level so the API-call mock is a plain function rather than a
|
||||
# closure defined inside the test method.
|
||||
TOTAL_PROJECTS = 150
|
||||
many_project_names = [f"project-{i}" for i in range(TOTAL_PROJECTS)]
|
||||
many_project_arns = [
|
||||
f"arn:{AWS_COMMERCIAL_PARTITION}:codebuild:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:project/{name}"
|
||||
for name in many_project_names
|
||||
]
|
||||
many_build_ids_for = {name: f"{name}:build-id" for name in many_project_names}
|
||||
many_end_times_for = {
|
||||
name: datetime.now() - timedelta(days=i)
|
||||
for i, name in enumerate(many_project_names)
|
||||
}
|
||||
many_name_by_build_id = {v: k for k, v in many_build_ids_for.items()}
|
||||
many_batch_call_sizes = {"BatchGetProjects": [], "BatchGetBuilds": []}
|
||||
|
||||
|
||||
def mock_make_api_call_many_projects(self, operation_name, kwarg):
|
||||
if operation_name == "ListProjects":
|
||||
return {"projects": many_project_names}
|
||||
if operation_name == "ListBuildsForProject":
|
||||
return {"ids": [many_build_ids_for[kwarg["projectName"]]]}
|
||||
if operation_name == "BatchGetBuilds":
|
||||
ids = kwarg["ids"]
|
||||
many_batch_call_sizes["BatchGetBuilds"].append(len(ids))
|
||||
# Reverse the response order to verify id->project mapping does not
|
||||
# depend on response ordering.
|
||||
builds = [
|
||||
{"id": bid, "endTime": many_end_times_for[many_name_by_build_id[bid]]}
|
||||
for bid in reversed(ids)
|
||||
]
|
||||
return {"builds": builds}
|
||||
if operation_name == "BatchGetProjects":
|
||||
names = kwarg["names"]
|
||||
many_batch_call_sizes["BatchGetProjects"].append(len(names))
|
||||
# Reverse the response order to verify arn->project mapping does not
|
||||
# depend on response ordering.
|
||||
projects = [
|
||||
{
|
||||
"arn": f"arn:{AWS_COMMERCIAL_PARTITION}:codebuild:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:project/{name}",
|
||||
"source": {"type": "NO_SOURCE"},
|
||||
"logsConfig": {},
|
||||
"tags": [],
|
||||
"projectVisibility": "PRIVATE",
|
||||
}
|
||||
for name in reversed(names)
|
||||
]
|
||||
return {"projects": projects}
|
||||
if operation_name == "ListReportGroups":
|
||||
return {"reportGroups": []}
|
||||
return make_api_call(self, operation_name, kwarg)
|
||||
|
||||
|
||||
class Test_Codebuild_Service_Batching:
|
||||
@patch(
|
||||
"botocore.client.BaseClient._make_api_call",
|
||||
new=mock_make_api_call_many_projects,
|
||||
)
|
||||
@patch(
|
||||
"prowler.providers.aws.aws_provider.AwsProvider.generate_regional_clients",
|
||||
new=mock_generate_regional_clients,
|
||||
)
|
||||
@mock_aws
|
||||
def test_codebuild_batches_chunks_over_100_projects_and_maps_out_of_order_responses(
|
||||
self,
|
||||
):
|
||||
"""Verify _batch_get_projects/_batch_get_builds chunk in groups of 100
|
||||
and correctly map out-of-order batch responses back to the right
|
||||
project using `arn`/`id`.
|
||||
"""
|
||||
# Reset the per-test recorder (module-level state survives across runs).
|
||||
many_batch_call_sizes["BatchGetProjects"].clear()
|
||||
many_batch_call_sizes["BatchGetBuilds"].clear()
|
||||
|
||||
codebuild = Codebuild(set_mocked_aws_provider([AWS_REGION_EU_WEST_1]))
|
||||
|
||||
# Verify chunking: 150 items -> two batches of 100 and 50.
|
||||
assert sorted(many_batch_call_sizes["BatchGetProjects"]) == [50, 100]
|
||||
assert sorted(many_batch_call_sizes["BatchGetBuilds"]) == [50, 100]
|
||||
|
||||
# Verify all projects were tracked.
|
||||
assert len(codebuild.projects) == TOTAL_PROJECTS
|
||||
|
||||
# Verify out-of-order responses were correctly mapped back to the
|
||||
# right project by `arn` (projects) and `id` (builds).
|
||||
for name, arn in zip(many_project_names, many_project_arns):
|
||||
project = codebuild.projects[arn]
|
||||
assert project.name == name
|
||||
assert project.project_visibility == "PRIVATE"
|
||||
assert project.last_build == Build(id=many_build_ids_for[name])
|
||||
assert project.last_invoked_time == many_end_times_for[name]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user