Compare commits

..

2 Commits

143 changed files with 1633 additions and 8936 deletions
+11 -12
View File
@@ -1,15 +1,14 @@
# SDK
/* @prowler-cloud/detection-remediation
/prowler/ @prowler-cloud/detection-remediation
/prowler/compliance/ @prowler-cloud/compliance
/tests/ @prowler-cloud/detection-remediation
/dashboard/ @prowler-cloud/detection-remediation
/docs/ @prowler-cloud/detection-remediation
/examples/ @prowler-cloud/detection-remediation
/util/ @prowler-cloud/detection-remediation
/contrib/ @prowler-cloud/detection-remediation
/permissions/ @prowler-cloud/detection-remediation
/codecov.yml @prowler-cloud/detection-remediation @prowler-cloud/api
/* @prowler-cloud/sdk
/prowler/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/tests/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/dashboard/ @prowler-cloud/sdk
/docs/ @prowler-cloud/sdk
/examples/ @prowler-cloud/sdk
/util/ @prowler-cloud/sdk
/contrib/ @prowler-cloud/sdk
/permissions/ @prowler-cloud/sdk
/codecov.yml @prowler-cloud/sdk @prowler-cloud/api
# API
/api/ @prowler-cloud/api
@@ -18,7 +17,7 @@
/ui/ @prowler-cloud/ui
# AI
/mcp_server/ @prowler-cloud/detection-remediation
/mcp_server/ @prowler-cloud/ai
# Platform
/.github/ @prowler-cloud/platform
@@ -64,6 +64,19 @@ runs:
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update SDK resolved_reference to latest commit (prowler repo on push)
if: github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
sed -i '/url = "https:\/\/github\.com\/prowler-cloud\/prowler\.git"/,/resolved_reference = / {
s/resolved_reference = "[a-f0-9]\{40\}"/resolved_reference = "'"$LATEST_COMMIT"'"/
}' poetry.lock
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update poetry.lock (prowler repo only)
if: github.repository == 'prowler-cloud/prowler' && inputs.update-lock == 'true'
shell: bash
+5 -6
View File
@@ -27,12 +27,11 @@ jobs:
- name: Harden Runner
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
with:
# We can't block as Trufflehog needs to verify secrets against vendors
egress-policy: audit
# allowed-endpoints: >
# github.com:443
# ghcr.io:443
# pkg-containers.githubusercontent.com:443
egress-policy: block
allowed-endpoints: >
github.com:443
ghcr.io:443
pkg-containers.githubusercontent.com:443
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+1 -26
View File
@@ -2,38 +2,12 @@
All notable changes to the **Prowler API** are documented in this file.
## [1.25.3] (Prowler v5.24.3)
### 🐞 Fixed
- Finding groups aggregated `status` now treats muted findings as resolved: a group is `FAIL` only while at least one non-muted FAIL remains, otherwise it is `PASS` (including fully-muted groups). The `filter[status]` filter and the `sort=status` ordering share the same semantics, keeping `status` consistent with `fail_count` and the orthogonal `muted` flag [(#10825)](https://github.com/prowler-cloud/prowler/pull/10825)
---
## [1.25.2] (Prowler v5.24.2)
### 🔄 Changed
- Finding groups `/resources` endpoints now materialize the filtered finding IDs into a Python list before filtering `ResourceFindingMapping`, so PostgreSQL switches from a Merge Semi Join that read hundreds of thousands of RFM index entries to a Nested Loop Index Scan over `finding_id`. The `has_mappings.exists()` pre-check is removed, and a request-scoped cache deduplicates the finding-id round-trip across the helpers that build different RFM querysets [(#10816)](https://github.com/prowler-cloud/prowler/pull/10816)
### 🐞 Fixed
- `/finding-groups/latest/<check_id>/resources` now selects the latest completed scan per provider by `-completed_at` (then `-inserted_at`) instead of `-inserted_at`, matching the `/finding-groups/latest` summary path and the daily-summary upsert so overlapping scans no longer produce diverging `delta`/`new_count` between the two endpoints [(#10802)](https://github.com/prowler-cloud/prowler/pull/10802)
---
## [1.25.1] (Prowler v5.24.1)
### 🔄 Changed
- Attack Paths: Restore `SYNC_BATCH_SIZE` and `FINDINGS_BATCH_SIZE` defaults to 1000, upgrade Cartography to 0.135.0, enable Celery queue priority for cleanup task, rewrite Finding insertion, remove AWS graph cleanup and add timing logs [(#10729)](https://github.com/prowler-cloud/prowler/pull/10729)
### 🐞 Fixed
- Finding group resources endpoints now include findings without associated resources (orphaned IaC findings) as simulated resource rows, and return one row per finding when multiple findings share a resource [(#10708)](https://github.com/prowler-cloud/prowler/pull/10708)
- Attack Paths: Missing `tenant_id` filter while getting related findings after scan completes [(#10722)](https://github.com/prowler-cloud/prowler/pull/10722)
- Finding group counters `pass_count`, `fail_count` and `manual_count` now exclude muted findings [(#10753)](https://github.com/prowler-cloud/prowler/pull/10753)
- Silent data loss in `ResourceFindingMapping` bulk insert that left findings orphaned when `INSERT ... ON CONFLICT DO NOTHING` dropped rows without raising; added explicit `unique_fields` [(#10724)](https://github.com/prowler-cloud/prowler/pull/10724)
---
@@ -48,6 +22,7 @@ All notable changes to the **Prowler API** are documented in this file.
- Worker-beat race condition on cold start: replaced `sleep 15` with API service healthcheck dependency (Docker Compose) and init containers (Helm), aligned Gunicorn default port to `8080` [(#10603)](https://github.com/prowler-cloud/prowler/pull/10603)
- API container startup crash on Linux due to root-owned bind-mount preventing JWT key generation [(#10646)](https://github.com/prowler-cloud/prowler/pull/10646)
- Finding group resources endpoints now include findings without associated resources (orphan IaC findings) as simulated resource rows, and return one row per finding when multiple findings share a resource [(#10708)](https://github.com/prowler-cloud/prowler/pull/10708)
### 🔐 Security
+127 -151
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -682,21 +682,21 @@ requests = ">=2.21.0,<3.0.0"
[[package]]
name = "alibabacloud-tea-openapi"
version = "0.4.4"
version = "0.4.1"
description = "Alibaba Cloud openapi SDK Library for Python"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "alibabacloud_tea_openapi-0.4.4-py3-none-any.whl", hash = "sha256:cea6bc1fe35b0319a8752cb99eb0ecb0dab7ca1a71b99c12970ba0867410995f"},
{file = "alibabacloud_tea_openapi-0.4.4.tar.gz", hash = "sha256:1b0917bc03cd49417da64945e92731716d53e2eb8707b235f54e45b7473221ce"},
{file = "alibabacloud_tea_openapi-0.4.1-py3-none-any.whl", hash = "sha256:e46bfa3ca34086d2c357d217a0b7284ecbd4b3bab5c88e075e73aec637b0e4a0"},
{file = "alibabacloud_tea_openapi-0.4.1.tar.gz", hash = "sha256:2384b090870fdb089c3c40f3fb8cf0145b8c7d6c14abbac521f86a01abb5edaf"},
]
[package.dependencies]
alibabacloud-credentials = ">=1.0.2,<2.0.0"
alibabacloud-gateway-spi = ">=0.0.2,<1.0.0"
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
cryptography = {version = ">=3.0.0,<47.0.0", markers = "python_version >= \"3.8\""}
cryptography = ">=3.0.0,<45.0.0"
darabonba-core = ">=1.0.3,<2.0.0"
[[package]]
@@ -1526,19 +1526,19 @@ typing-extensions = ">=4.6.0"
[[package]]
name = "azure-mgmt-resource"
version = "24.0.0"
version = "23.3.0"
description = "Microsoft Azure Resource Management Client Library for Python"
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "azure_mgmt_resource-24.0.0-py3-none-any.whl", hash = "sha256:27b32cd223e2784269f5a0db3c282042886ee4072d79cedc638438ece7cd0df4"},
{file = "azure_mgmt_resource-24.0.0.tar.gz", hash = "sha256:cf6b8995fcdd407ac9ff1dd474087129429a1d90dbb1ac77f97c19b96237b265"},
{file = "azure_mgmt_resource-23.3.0-py3-none-any.whl", hash = "sha256:ab216ee28e29db6654b989746e0c85a1181f66653929d2cb6e48fba66d9af323"},
{file = "azure_mgmt_resource-23.3.0.tar.gz", hash = "sha256:fc4f1fd8b6aad23f8af4ed1f913df5f5c92df117449dc354fea6802a2829fea4"},
]
[package.dependencies]
azure-common = ">=1.1"
azure-mgmt-core = ">=1.5.0"
azure-mgmt-core = ">=1.3.2"
isodate = ">=0.6.1"
typing-extensions = ">=4.6.0"
@@ -1822,19 +1822,19 @@ crt = ["awscrt (==0.27.6)"]
[[package]]
name = "cartography"
version = "0.135.0"
version = "0.132.0"
description = "Explore assets and their relationships across your technical infrastructure."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "cartography-0.135.0-py3-none-any.whl", hash = "sha256:c62c32a6917b8f23a8b98fe2b6c7c4a918b50f55918482966c4dae1cf5f538e1"},
{file = "cartography-0.135.0.tar.gz", hash = "sha256:3f500cd22c3b392d00e8b49f62acc95fd4dcd559ce514aafe2eb8101133c7a49"},
{file = "cartography-0.132.0-py3-none-any.whl", hash = "sha256:c070aa51d0ab4479cb043cae70b35e7df49f2fb5f1fa95ccf10000bbeb952262"},
{file = "cartography-0.132.0.tar.gz", hash = "sha256:7c6332bc57fd2629d7b83aee7bd95a7b2edb0d51ef746efa0461399e0b66625c"},
]
[package.dependencies]
adal = ">=1.2.4"
aioboto3 = ">=15.0.0"
aioboto3 = ">=13.0.0"
azure-cli-core = ">=2.26.0"
azure-identity = ">=1.5.0"
azure-keyvault-certificates = ">=4.0.0"
@@ -1852,9 +1852,9 @@ azure-mgmt-keyvault = ">=10.0.0"
azure-mgmt-logic = ">=10.0.0"
azure-mgmt-monitor = ">=3.0.0"
azure-mgmt-network = ">=25.0.0"
azure-mgmt-resource = ">=24.0.0,<25"
azure-mgmt-resource = ">=10.2.0,<25.0.0"
azure-mgmt-security = ">=5.0.0"
azure-mgmt-sql = ">=3.0.1"
azure-mgmt-sql = ">=3.0.1,<4"
azure-mgmt-storage = ">=16.0.0"
azure-mgmt-synapse = ">=2.0.0"
azure-mgmt-web = ">=7.0.0"
@@ -1862,39 +1862,38 @@ azure-synapse-artifacts = ">=0.17.0"
backoff = ">=2.1.2"
boto3 = ">=1.15.1"
botocore = ">=1.18.1"
cloudflare = ">=4.1.0"
cloudflare = ">=4.1.0,<5.0.0"
crowdstrike-falconpy = ">=0.5.1"
cryptography = ">=45.0.0"
dnspython = ">=2.0.0"
duo-client = ">=5.5.0"
google-api-python-client = ">=2.0.0"
cryptography = "*"
dnspython = ">=1.15.0"
duo-client = "*"
google-api-python-client = ">=1.7.8"
google-auth = ">=2.37.0"
google-cloud-asset = ">=1.0.0"
google-cloud-resource-manager = ">=1.14.2"
httpx = ">=0.24.0"
kubernetes = ">=22.6.0"
marshmallow = ">=4.0.0"
msgraph-sdk = ">=1.53.0"
marshmallow = ">=3.0.0rc7"
msgraph-sdk = "*"
msrestazure = ">=0.6.4"
neo4j = ">=6.0.0"
oci = ">=2.71.0"
okta = "<1.0.0"
packageurl-python = ">=0.17.0"
packaging = ">=26.0.0"
packageurl-python = "*"
packaging = "*"
pagerduty = ">=4.0.1"
policyuniverse = ">=1.1.0.0"
PyJWT = {version = ">=2.0.0", extras = ["crypto"]}
python-dateutil = ">=2.9.0"
python-dateutil = "*"
python-digitalocean = ">=1.16.0"
pyyaml = ">=5.3.1"
requests = ">=2.22.0"
scaleway = ">=2.10.0"
slack-sdk = ">=3.37.0"
statsd = ">=4.0.0"
statsd = "*"
typer = ">=0.9.0"
types-aiobotocore-ecr = ">=3.1.0"
workos = ">=5.44.0"
xmltodict = ">=1.0.0"
types-aiobotocore-ecr = "*"
xmltodict = "*"
[[package]]
name = "celery"
@@ -2504,74 +2503,62 @@ dev = ["bandit", "coverage", "flake8", "pydocstyle", "pylint", "pytest", "pytest
[[package]]
name = "cryptography"
version = "46.0.6"
version = "44.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
python-versions = "!=3.9.0,!=3.9.1,>=3.7"
groups = ["main", "dev"]
files = [
{file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"},
{file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"},
{file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"},
{file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"},
{file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"},
{file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"},
{file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"},
{file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"},
{file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"},
{file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"},
{file = "cryptography-44.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d"},
{file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904"},
{file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44"},
{file = "cryptography-44.0.3-cp37-abi3-win32.whl", hash = "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d"},
{file = "cryptography-44.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d"},
{file = "cryptography-44.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f"},
{file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5"},
{file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b"},
{file = "cryptography-44.0.3-cp39-abi3-win32.whl", hash = "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028"},
{file = "cryptography-44.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c"},
{file = "cryptography-44.0.3.tar.gz", hash = "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053"},
]
[package.dependencies]
cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""}
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0) ; python_version >= \"3.8\""]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
nox = ["nox[uv] (>=2024.4.15)"]
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_version >= \"3.8\""]
pep8test = ["check-sdist ; python_version >= \"3.8\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test = ["certifi (>=2024)", "cryptography-vectors (==44.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -2974,7 +2961,7 @@ files = [
[package.dependencies]
autopep8 = "*"
Django = ">=4.2"
gprof2dot = ">=2017.09.19"
gprof2dot = ">=2017.9.19"
sqlparse = "*"
[[package]]
@@ -3753,19 +3740,19 @@ urllib3 = ["packaging", "urllib3"]
[[package]]
name = "google-auth-httplib2"
version = "0.2.0"
version = "0.2.1"
description = "Google Authentication Library: httplib2 transport"
optional = false
python-versions = "*"
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"},
{file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"},
{file = "google_auth_httplib2-0.2.1-py3-none-any.whl", hash = "sha256:1be94c611db91c01f9703e7f62b0a59bbd5587a95571c7b6fade510d648bc08b"},
{file = "google_auth_httplib2-0.2.1.tar.gz", hash = "sha256:5ef03be3927423c87fb69607b42df23a444e434ddb2555b73b3679793187b7de"},
]
[package.dependencies]
google-auth = "*"
httplib2 = ">=0.19.0"
google-auth = ">=1.32.0,<3.0.0"
httplib2 = ">=0.19.0,<1.0.0"
[[package]]
name = "google-cloud-access-context-manager"
@@ -4582,7 +4569,7 @@ files = [
[package.dependencies]
attrs = ">=22.2.0"
jsonschema-specifications = ">=2023.03.6"
jsonschema-specifications = ">=2023.3.6"
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
@@ -4790,7 +4777,7 @@ librabbitmq = ["librabbitmq (>=2.0.0) ; python_version < \"3.11\""]
mongodb = ["pymongo (==4.15.3)"]
msgpack = ["msgpack (==1.1.2)"]
pyro = ["pyro4 (==4.82)"]
qpid = ["qpid-python (==1.36.0-1)", "qpid-tools (==1.36.0-1)"]
qpid = ["qpid-python (==1.36.0.post1)", "qpid-tools (==1.36.0.post1)"]
redis = ["redis (>=4.5.2,!=4.5.5,!=5.0.2,<6.5)"]
slmq = ["softlayer_messaging (>=1.0.3)"]
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
@@ -4811,7 +4798,7 @@ files = [
]
[package.dependencies]
certifi = ">=14.05.14"
certifi = ">=14.5.14"
durationpy = ">=0.7"
google-auth = ">=1.0.1"
oauthlib = ">=3.2.2"
@@ -5194,16 +5181,24 @@ files = [
[[package]]
name = "marshmallow"
version = "4.3.0"
version = "3.26.2"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.10"
python-versions = ">=3.9"
groups = ["main", "dev"]
files = [
{file = "marshmallow-4.3.0-py3-none-any.whl", hash = "sha256:46c4fe6984707e3cbd485dfebbf0a59874f58d695aad05c1668d15e8c6e13b46"},
{file = "marshmallow-4.3.0.tar.gz", hash = "sha256:fb43c53b3fe240b8f6af37223d6ef1636f927ad9bea8ab323afad95dff090880"},
{file = "marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73"},
{file = "marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
tests = ["pytest", "simplejson"]
[[package]]
name = "matplotlib"
version = "3.10.8"
@@ -5497,14 +5492,14 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"]
[[package]]
name = "msgraph-sdk"
version = "1.55.0"
version = "1.23.0"
description = "The Microsoft Graph Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "msgraph_sdk-1.55.0-py3-none-any.whl", hash = "sha256:c8e68ebc4b88af5111de312e7fa910a4e76ddf48a4534feadb1fb8a411c48cfc"},
{file = "msgraph_sdk-1.55.0.tar.gz", hash = "sha256:6df691a31954a050d26b8a678968017e157d940fb377f2a8a4e17a9741b98756"},
{file = "msgraph_sdk-1.23.0-py3-none-any.whl", hash = "sha256:58e0047b4ca59fd82022c02cd73fec0170a3d84f3b76721e3db2a0314df9a58a"},
{file = "msgraph_sdk-1.23.0.tar.gz", hash = "sha256:6dd1ba9a46f5f0ce8599fd9610133adbd9d1493941438b5d3632fce9e55ed607"},
]
[package.dependencies]
@@ -5930,24 +5925,23 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "oci"
version = "2.169.0"
version = "2.160.3"
description = "Oracle Cloud Infrastructure Python SDK"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "oci-2.169.0-py3-none-any.whl", hash = "sha256:c71bb5143f307791082b3e33cc1545c2490a518cfed85ab1948ef5107c36d30b"},
{file = "oci-2.169.0.tar.gz", hash = "sha256:f3c5fff00b01783b5325ea7b13bf140053ec1e9f41da20bfb9c8a349ee7662fa"},
{file = "oci-2.160.3-py3-none-any.whl", hash = "sha256:858bff3e697098bdda44833d2476bfb4632126f0182178e7dbde4dbd156d71f0"},
{file = "oci-2.160.3.tar.gz", hash = "sha256:57514889be3b713a8385d86e3ba8a33cf46e3563c2a7e29a93027fb30b8a2537"},
]
[package.dependencies]
certifi = "*"
circuitbreaker = {version = ">=1.3.1,<3.0.0", markers = "python_version >= \"3.7\""}
cryptography = ">=3.2.1,<47.0.0"
pyOpenSSL = ">=17.5.0,<27.0.0"
cryptography = ">=3.2.1,<46.0.0"
pyOpenSSL = ">=17.5.0,<25.0.0"
python-dateutil = ">=2.5.3,<3.0.0"
pytz = ">=2016.10"
urllib3 = {version = ">=2.6.3", markers = "python_version >= \"3.10.0\""}
[package.extras]
adk = ["docstring-parser (>=0.16) ; python_version >= \"3.10\" and python_version < \"4\"", "mcp (>=1.6.0) ; python_version >= \"3.10\" and python_version < \"4\"", "pydantic (>=2.10.6) ; python_version >= \"3.10\" and python_version < \"4\"", "rich (>=13.9.4) ; python_version >= \"3.10\" and python_version < \"4\""]
@@ -6665,7 +6659,7 @@ files = [
[[package]]
name = "prowler"
version = "5.25.0"
version = "5.23.0"
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
optional = false
python-versions = ">=3.10,<3.13"
@@ -6685,7 +6679,7 @@ alibabacloud-rds20140815 = "12.0.0"
alibabacloud_sas20181203 = "6.1.0"
alibabacloud-sls20201230 = "5.9.0"
alibabacloud_sts20150401 = "1.1.6"
alibabacloud_tea_openapi = "0.4.4"
alibabacloud_tea_openapi = "0.4.1"
alibabacloud_vpc20160428 = "6.13.0"
alive-progress = "3.3.0"
awsipranges = "0.3.3"
@@ -6707,7 +6701,7 @@ azure-mgmt-postgresqlflexibleservers = "1.1.0"
azure-mgmt-rdbms = "10.1.0"
azure-mgmt-recoveryservices = "3.1.0"
azure-mgmt-recoveryservicesbackup = "9.2.0"
azure-mgmt-resource = "24.0.0"
azure-mgmt-resource = "23.3.0"
azure-mgmt-search = "9.1.0"
azure-mgmt-security = "7.0.0"
azure-mgmt-sql = "3.0.1"
@@ -6720,29 +6714,29 @@ boto3 = "1.40.61"
botocore = "1.40.61"
cloudflare = "4.3.1"
colorama = "0.4.6"
cryptography = "46.0.6"
cryptography = "44.0.3"
dash = "3.1.1"
dash-bootstrap-components = "2.0.3"
defusedxml = "0.7.1"
defusedxml = ">=0.7.1"
detect-secrets = "1.5.0"
dulwich = "0.23.0"
google-api-python-client = "2.163.0"
google-auth-httplib2 = "0.2.0"
google-auth-httplib2 = ">=0.1,<0.3"
h2 = "4.3.0"
jsonschema = "4.23.0"
kubernetes = "32.0.1"
markdown = "3.10.2"
microsoft-kiota-abstractions = "1.9.2"
msgraph-sdk = "1.55.0"
msgraph-sdk = "1.23.0"
numpy = "2.0.2"
oci = "2.169.0"
oci = "2.160.3"
openstacksdk = "4.2.0"
pandas = "2.2.3"
py-iam-expand = "0.1.0"
py-ocsf-models = "0.8.1"
pydantic = "2.12.5"
pydantic = ">=2.0,<3.0"
pygithub = "2.8.0"
python-dateutil = "2.9.0.post0"
python-dateutil = ">=2.9.0.post0,<3.0.0"
pytz = "2025.1"
schema = "0.7.5"
shodan = "1.31.0"
@@ -6755,7 +6749,7 @@ uuid6 = "2024.7.10"
type = "git"
url = "https://github.com/prowler-cloud/prowler.git"
reference = "master"
resolved_reference = "ca29e354b622198ff6a70e2ea5eb04e4a44a0903"
resolved_reference = "6ac90eb1b58590b6f2f51645dbef17b9231053f4"
[[package]]
name = "psutil"
@@ -6920,14 +6914,14 @@ pydantic = ">=2.12.0,<3.0.0"
[[package]]
name = "pyasn1"
version = "0.6.3"
version = "0.6.2"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pyasn1-0.6.3-py3-none-any.whl", hash = "sha256:a80184d120f0864a52a073acc6fc642847d0be408e7c7252f31390c0f4eadcde"},
{file = "pyasn1-0.6.3.tar.gz", hash = "sha256:697a8ecd6d98891189184ca1fa05d1bb00e2f84b5977c481452050549c8a72cf"},
{file = "pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf"},
{file = "pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b"},
]
[[package]]
@@ -6964,11 +6958,11 @@ description = "C parser in Python"
optional = false
python-versions = ">=3.10"
groups = ["main", "dev"]
markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""
files = [
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
]
markers = {main = "implementation_name != \"PyPy\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""}
[[package]]
name = "pydantic"
@@ -7194,7 +7188,7 @@ files = [
]
[package.dependencies]
astroid = ">=3.2.2,<=3.3.0-dev0"
astroid = ">=3.2.2,<=3.3.0.dev0"
colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
dill = [
{version = ">=0.3.7", markers = "python_version >= \"3.12\""},
@@ -7294,19 +7288,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=7.4.0)", "pytest-cov (>=2.10.1)", "
[[package]]
name = "pyopenssl"
version = "26.0.0"
version = "24.3.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "pyopenssl-26.0.0-py3-none-any.whl", hash = "sha256:df94d28498848b98cc1c0ffb8ef1e71e40210d3b0a8064c9d29571ed2904bf81"},
{file = "pyopenssl-26.0.0.tar.gz", hash = "sha256:f293934e52936f2e3413b89c6ce36df66a0b34ae1ea3a053b8c5020ff2f513fc"},
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
]
[package.dependencies]
cryptography = ">=46.0.0,<47"
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
cryptography = ">=41.0.5,<45"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
@@ -8209,10 +8202,10 @@ files = [
]
[package.dependencies]
botocore = ">=1.37.4,<2.0a.0"
botocore = ">=1.37.4,<2.0a0"
[package.extras]
crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"]
crt = ["botocore[crt] (>=1.37.4,<2.0a0)"]
[[package]]
name = "safety"
@@ -8814,23 +8807,6 @@ markupsafe = ">=2.1.1"
[package.extras]
watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "workos"
version = "6.0.4"
description = "WorkOS Python Client"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "workos-6.0.4-py3-none-any.whl", hash = "sha256:548668b3702673536f853ba72a7b5bbbc269e467aaf9ac4f477b6e0177df5e21"},
{file = "workos-6.0.4.tar.gz", hash = "sha256:b0bfe8fd212b8567422c4ea3732eb33608794033eb3a69900c6b04db183c32d6"},
]
[package.dependencies]
cryptography = ">=46.0,<47.0"
httpx = ">=0.28,<1.0"
pyjwt = ">=2.12,<3.0"
[[package]]
name = "wrapt"
version = "1.17.3"
@@ -9424,4 +9400,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "a3ab982d11a87d951ff15694d2ca7fd51f1f51a451abb0baa067ccf6966367a8"
content-hash = "077e89853cfe3a6d934841488cfa5a98ff6c92b71f74b817b71387d11559f143"
+2 -1
View File
@@ -38,7 +38,7 @@ dependencies = [
"matplotlib (==3.10.8)",
"reportlab (==4.4.10)",
"neo4j (==6.1.0)",
"cartography (==0.135.0)",
"cartography (==0.132.0)",
"gevent (==25.9.1)",
"werkzeug (==3.1.7)",
"sqlparse (==0.5.5)",
@@ -62,6 +62,7 @@ django-silk = "5.3.2"
docker = "7.1.0"
filelock = "3.20.3"
freezegun = "1.5.1"
marshmallow = "==3.26.2"
mypy = "1.10.1"
pylint = "3.2.5"
pytest = "9.0.3"
@@ -1,23 +0,0 @@
from django.db import migrations
TASK_NAME = "attack-paths-cleanup-stale-scans"
def set_cleanup_priority(apps, schema_editor):
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=0)
def unset_cleanup_priority(apps, schema_editor):
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=None)
class Migration(migrations.Migration):
dependencies = [
("api", "0089_backfill_finding_group_status_muted"),
]
operations = [
migrations.RunPython(set_cleanup_priority, unset_cleanup_priority),
]
+7 -192
View File
@@ -15446,15 +15446,15 @@ class TestFindingGroupViewSet:
# iam_password_policy has only PASS findings
assert data[0]["attributes"]["status"] == "PASS"
def test_finding_groups_fully_muted_group_is_pass(
def test_finding_groups_fully_muted_group_reflects_underlying_status(
self, authenticated_client, finding_groups_fixture
):
"""A fully-muted group reports status=PASS and muted=True.
"""A fully-muted group still surfaces its underlying status (no MUTED).
rds_encryption has 2 muted FAIL findings. Muted findings are treated
as resolved/accepted, so the group is no longer actionable and its
status must be PASS. The `muted` flag is True because every finding
in the group is muted.
rds_encryption has 2 muted FAIL findings, so the group must report
status=FAIL (the orthogonal `muted` boolean signals it isn't actionable).
The status×muted breakdown lets clients answer 'how many failing
findings are muted in this group'.
"""
response = authenticated_client.get(
reverse("finding-group-list"),
@@ -15464,7 +15464,7 @@ class TestFindingGroupViewSet:
data = response.json()["data"]
assert len(data) == 1
attrs = data[0]["attributes"]
assert attrs["status"] == "PASS"
assert attrs["status"] == "FAIL"
assert attrs["muted"] is True
assert attrs["fail_count"] == 0
assert attrs["fail_muted_count"] == 2
@@ -15479,83 +15479,6 @@ class TestFindingGroupViewSet:
== attrs["muted_count"]
)
def test_finding_groups_status_ignores_muted_failures(
self,
authenticated_client,
tenants_fixture,
scans_fixture,
resources_fixture,
):
"""Muted FAIL findings must not drive the aggregated status.
When a group mixes one non-muted PASS with one muted FAIL, the
actionable outcome is PASS: there are no unmuted failures left. The
aggregated `status` must reflect that (not FAIL), while `muted`
stays False because the group still has a non-muted finding.
"""
tenant = tenants_fixture[0]
scan1, *_ = scans_fixture
resource1, *_ = resources_fixture
pass_finding = Finding.objects.create(
tenant_id=tenant.id,
uid="fg_mixed_muted_pass",
scan=scan1,
delta=None,
status=Status.PASS,
severity=Severity.low,
impact=Severity.low,
check_id="mixed_muted_check",
check_metadata={
"CheckId": "mixed_muted_check",
"checktitle": "Mixed muted check",
"Description": "Fixture for muted status aggregation.",
},
first_seen_at="2024-01-11T00:00:00Z",
muted=False,
)
pass_finding.add_resources([resource1])
fail_muted_finding = Finding.objects.create(
tenant_id=tenant.id,
uid="fg_mixed_muted_fail",
scan=scan1,
delta=None,
status=Status.FAIL,
severity=Severity.high,
impact=Severity.high,
check_id="mixed_muted_check",
check_metadata={
"CheckId": "mixed_muted_check",
"checktitle": "Mixed muted check",
"Description": "Fixture for muted status aggregation.",
},
first_seen_at="2024-01-12T00:00:00Z",
muted=True,
)
fail_muted_finding.add_resources([resource1])
# filter[region] forces finding-level aggregation so we exercise the
# raw-findings path without touching the daily summary fixture.
response = authenticated_client.get(
reverse("finding-group-list"),
{
"filter[inserted_at]": TODAY,
"filter[check_id]": "mixed_muted_check",
"filter[region]": "us-east-1",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
attrs = data[0]["attributes"]
assert attrs["status"] == "PASS"
assert attrs["muted"] is False
assert attrs["pass_count"] == 1
assert attrs["fail_count"] == 0
assert attrs["fail_muted_count"] == 1
assert attrs["muted_count"] == 1
def test_finding_groups_status_filter(
self, authenticated_client, finding_groups_fixture
):
@@ -17348,111 +17271,3 @@ class TestFindingGroupViewSet:
attrs = item["attributes"]
assert "finding_id" in attrs
assert attrs["finding_id"] in rds_finding_ids
def test_latest_resources_picks_scan_by_completed_at_when_overlap(
self,
authenticated_client,
tenants_fixture,
providers_fixture,
resources_fixture,
):
"""Overlapping scans on the same provider must resolve to the scan
with the latest completed_at, matching the /latest summary path and
the daily-summary upsert (keyed on midnight(completed_at)). Picking
by inserted_at here produced /resources and /latest reading from
different scans and reporting diverging delta/new counts.
"""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
resource = resources_fixture[0]
check_id = "overlap_regression_check"
t0 = datetime.now(timezone.utc) - timedelta(hours=5)
t1 = t0 + timedelta(hours=1)
t1_end = t1 + timedelta(minutes=30)
t2 = t0 + timedelta(hours=4)
scan_long = Scan.objects.create(
name="long overlap scan",
provider=provider,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant_id=tenant.id,
started_at=t0,
completed_at=t2,
)
scan_short = Scan.objects.create(
name="short overlap scan",
provider=provider,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant_id=tenant.id,
started_at=t1,
completed_at=t1_end,
)
# inserted_at is auto_now_add so override with .update() to recreate
# the overlap shape: short scan inserted later but completed earlier.
Scan.all_objects.filter(pk=scan_long.pk).update(inserted_at=t0)
Scan.all_objects.filter(pk=scan_short.pk).update(inserted_at=t1)
scan_long.refresh_from_db()
scan_short.refresh_from_db()
assert scan_short.inserted_at > scan_long.inserted_at
assert scan_long.completed_at > scan_short.completed_at
long_finding = Finding.objects.create(
tenant_id=tenant.id,
uid=f"{check_id}_long",
scan=scan_long,
delta=None,
status=Status.FAIL,
status_extended="long scan finding",
impact=Severity.high,
impact_extended="high",
severity=Severity.high,
raw_result={"status": Status.FAIL, "severity": Severity.high},
check_id=check_id,
check_metadata={
"CheckId": check_id,
"checktitle": "Overlap regression",
"Description": "Overlapping scan regression.",
},
first_seen_at=t0,
muted=False,
)
long_finding.add_resources([resource])
short_finding = Finding.objects.create(
tenant_id=tenant.id,
uid=f"{check_id}_short",
scan=scan_short,
delta="new",
status=Status.FAIL,
status_extended="short scan finding",
impact=Severity.high,
impact_extended="high",
severity=Severity.high,
raw_result={"status": Status.FAIL, "severity": Severity.high},
check_id=check_id,
check_metadata={
"CheckId": check_id,
"checktitle": "Overlap regression",
"Description": "Overlapping scan regression.",
},
first_seen_at=t1,
muted=False,
)
short_finding.add_resources([resource])
response = authenticated_client.get(
reverse(
"finding-group-latest_resources",
kwargs={"check_id": check_id},
),
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
attrs = data[0]["attributes"]
assert attrs["finding_id"] == str(long_finding.id)
assert attrs["delta"] is None
+35 -60
View File
@@ -7281,18 +7281,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
# finding-level aggregation path.
row.pop("nonmuted_count", None)
# Muted findings are treated as resolved/accepted, so they do not
# contribute to a failing status. A group is FAIL only when there
# is at least one non-muted FAIL; otherwise any pass (muted or
# not) or any muted fail makes the group PASS. Only groups whose
# findings are exclusively MANUAL fall through to MANUAL.
if row.get("fail_count", 0) > 0:
# Compute aggregated status from non-muted counts first, then
# fall back to muted counts so fully-muted groups still reflect
# the underlying check outcome.
total_fail = row.get("fail_count", 0) + row.get("fail_muted_count", 0)
total_pass = row.get("pass_count", 0) + row.get("pass_muted_count", 0)
if total_fail > 0:
row["status"] = "FAIL"
elif (
row.get("pass_count", 0) > 0
or row.get("pass_muted_count", 0) > 0
or row.get("fail_muted_count", 0) > 0
):
elif total_pass > 0:
row["status"] = "PASS"
else:
row["status"] = "MANUAL"
@@ -7392,11 +7388,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
if computed_params.get("status") or computed_params.getlist("status__in"):
queryset = queryset.annotate(
total_fail=F("fail_count") + F("fail_muted_count"),
total_pass=F("pass_count") + F("pass_muted_count"),
).annotate(
aggregated_status=Case(
When(fail_count__gt=0, then=Value("FAIL")),
When(pass_count__gt=0, then=Value("PASS")),
When(pass_muted_count__gt=0, then=Value("PASS")),
When(fail_muted_count__gt=0, then=Value("PASS")),
When(total_fail__gt=0, then=Value("FAIL")),
When(total_pass__gt=0, then=Value("PASS")),
default=Value("MANUAL"),
output_field=CharField(),
)
@@ -7416,25 +7413,6 @@ class FindingGroupViewSet(BaseRLSViewSet):
return filterset.qs
def _resolve_finding_ids(self, filtered_queryset):
"""
Materialize and request-cache the finding_ids list used to anchor
RFM lookups.
Turning `finding_id__in=Subquery(findings_qs)` into `finding_id__in=
[uuid, ...]` nudges PostgreSQL out of a Merge Semi Join that ends up
reading hundreds of thousands of RFM index entries just to post-
filter tenant_id. Caching on the ViewSet instance (one instance per
request) avoids duplicating the findings round-trip when several
helpers build different RFM querysets from the same filtered set.
"""
cached = getattr(self, "_finding_ids_cache", None)
if cached is not None and cached[0] is filtered_queryset:
return cached[1]
finding_ids = list(filtered_queryset.order_by().values_list("id", flat=True))
self._finding_ids_cache = (filtered_queryset, finding_ids)
return finding_ids
def _build_resource_mapping_queryset(
self, filtered_queryset, resource_ids=None, tenant_id: str | None = None
):
@@ -7444,10 +7422,10 @@ class FindingGroupViewSet(BaseRLSViewSet):
Starting from ResourceFindingMapping avoids scanning all mappings
before applying check_id/date filters on findings.
"""
finding_ids = self._resolve_finding_ids(filtered_queryset)
finding_ids = filtered_queryset.order_by().values("id")
mapping_queryset = ResourceFindingMapping.objects.filter(
finding_id__in=finding_ids
finding_id__in=Subquery(finding_ids)
)
if tenant_id:
mapping_queryset = mapping_queryset.filter(tenant_id=tenant_id)
@@ -7801,11 +7779,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
if ordering:
if any(field.lstrip("-") == "status_order" for field in ordering):
aggregated_queryset = aggregated_queryset.annotate(
total_fail_for_sort=F("fail_count") + F("fail_muted_count"),
total_pass_for_sort=F("pass_count") + F("pass_muted_count"),
).annotate(
status_order=Case(
When(fail_count__gt=0, then=Value(3)),
When(pass_count__gt=0, then=Value(2)),
When(pass_muted_count__gt=0, then=Value(2)),
When(fail_muted_count__gt=0, then=Value(2)),
When(total_fail_for_sort__gt=0, then=Value(3)),
When(total_pass_for_sort__gt=0, then=Value(2)),
default=Value(1),
output_field=IntegerField(),
)
@@ -7866,24 +7845,23 @@ class FindingGroupViewSet(BaseRLSViewSet):
request, filtered_queryset, resource_ids, tenant_id, ordering
)
# Serve the mapping response directly and piggyback on the paginator
# count to detect orphan-only groups, instead of paying a separate
# has_mappings.exists() semi-join over ResourceFindingMapping on
# every non-IaC request. TODO: once the ephemeral resources strategy
# is decided, mixed groups should route to _combined_paginated_response.
response = self._mapping_paginated_response(
request, filtered_queryset, resource_ids, tenant_id, ordering
)
has_mappings = self._build_resource_mapping_queryset(
filtered_queryset, resource_ids=None, tenant_id=tenant_id
).exists()
page = getattr(self.paginator, "page", None)
mapping_total = page.paginator.count if page is not None else None
if mapping_total == 0:
# Pure orphan group (e.g. IaC): synthesize resource-like rows.
return self._combined_paginated_response(
request, filtered_queryset, tenant_id, ordering
if has_mappings:
# Normal or mixed group: serve only resource-mapped rows.
# TODO: Orphan findings in mixed groups are intentionally excluded
# until the ephemeral resources strategy is decided. When resolved,
# route mixed groups to _combined_paginated_response instead.
return self._mapping_paginated_response(
request, filtered_queryset, resource_ids, tenant_id, ordering
)
return response
# Pure orphan group (e.g. IaC): synthesize resource-like rows.
return self._combined_paginated_response(
request, filtered_queryset, tenant_id, ordering
)
def _mapping_paginated_response(
self, request, filtered_queryset, resource_ids, tenant_id, ordering
@@ -8167,13 +8145,10 @@ class FindingGroupViewSet(BaseRLSViewSet):
tenant_id = request.tenant_id
queryset = self._get_finding_queryset()
# Order by -completed_at (matching the /latest summary path and the
# daily summary upsert keyed on midnight(completed_at)) so that
# overlapping scans do not make /resources and /latest read from
# different scans and report diverging counts.
# Get latest completed scan for each provider
latest_scan_ids = (
Scan.objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
.order_by("provider_id", "-completed_at", "-inserted_at")
.order_by("provider_id", "-inserted_at")
.distinct("provider_id")
.values_list("id", flat=True)
)
+1 -3
View File
@@ -17,10 +17,8 @@ celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.conf.update(result_extended=True, result_expires=None)
celery_app.conf.broker_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT,
"queue_order_strategy": "priority",
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
}
celery_app.conf.task_default_priority = 6
celery_app.conf.result_backend_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
}
+10 -46
View File
@@ -1,8 +1,6 @@
# Portions of this file are based on code from the Cartography project
# (https://github.com/cartography-cncf/cartography), which is licensed under the Apache 2.0 License.
import time
from typing import Any
import aioboto3
@@ -35,7 +33,7 @@ def start_aws_ingestion(
For the scan progress updates:
- The caller of this function (`tasks.jobs.attack_paths.scan.run`) has set it to 2.
- When the control returns to the caller, it will be set to 93.
- When the control returns to the caller, it will be set to 95.
"""
# Initialize variables common to all jobs
@@ -91,50 +89,34 @@ def start_aws_ingestion(
logger.info(
f"Syncing function permission_relationships for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.RESOURCE_FUNCTIONS["permission_relationships"](**sync_args)
logger.info(
f"Synced function permission_relationships for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 88)
if "resourcegroupstaggingapi" in requested_syncs:
logger.info(
f"Syncing function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.RESOURCE_FUNCTIONS["resourcegroupstaggingapi"](**sync_args)
logger.info(
f"Synced function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 89)
logger.info(
f"Syncing ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_ec2_iaminstanceprofile.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 90)
logger.info(
f"Syncing lambda_ecr analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_analysis_job(
"aws_lambda_ecr.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lambda_ecr analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
if all(
s in requested_syncs
@@ -143,34 +125,25 @@ def start_aws_ingestion(
logger.info(
f"Syncing lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_lb_container_exposure.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
if all(s in requested_syncs for s in ["ec2:network_acls", "ec2:load_balancer_v2"]):
logger.info(
f"Syncing lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_lb_nacl_direct.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 91)
logger.info(f"Syncing metadata for AWS account {prowler_api_provider.uid}")
t0 = time.perf_counter()
cartography_aws.merge_module_sync_metadata(
neo4j_session,
group_type="AWSAccount",
@@ -179,23 +152,24 @@ def start_aws_ingestion(
update_tag=cartography_config.update_tag,
stat_handler=cartography_aws.stat_handler,
)
logger.info(
f"Synced metadata for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 92)
# Removing the added extra field
del common_job_parameters["AWS_ID"]
logger.info(f"Syncing cleanup_job for AWS account {prowler_api_provider.uid}")
cartography_aws.run_cleanup_job(
"aws_post_ingestion_principals_cleanup.json",
neo4j_session,
common_job_parameters,
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
logger.info(f"Syncing analysis for AWS account {prowler_api_provider.uid}")
t0 = time.perf_counter()
cartography_aws._perform_aws_analysis(
requested_syncs, neo4j_session, common_job_parameters
)
logger.info(
f"Synced analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
return failed_syncs
@@ -260,8 +234,6 @@ def sync_aws_account(
)
try:
func_t0 = time.perf_counter()
# `ecr:image_layers` uses `aioboto3_session` instead of `boto3_session`
if func_name == "ecr:image_layers":
cartography_aws.RESOURCE_FUNCTIONS[func_name](
@@ -285,15 +257,7 @@ def sync_aws_account(
else:
cartography_aws.RESOURCE_FUNCTIONS[func_name](**sync_args)
logger.info(
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s"
)
except Exception as e:
logger.info(
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s (FAILED)"
)
exception_message = utils.stringify_exception(
e, f"Exception for AWS sync function: {func_name}"
)
@@ -8,9 +8,9 @@ from tasks.jobs.attack_paths import aws
# Batch size for Neo4j write operations (resource labeling, cleanup)
BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
# Batch size for Postgres findings fetch (keyset pagination page size)
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 1000)
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 500)
# Batch size for temp-to-tenant graph sync (nodes and relationships per cursor page)
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 1000)
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 250)
# Neo4j internal labels (Prowler-specific, not provider-specific)
# - `Internet`: Singleton node representing external internet access for exposed-resource queries
@@ -12,7 +12,6 @@ from typing import Any, Generator
from uuid import UUID
import neo4j
from cartography.config import Config as CartographyConfig
from celery.utils.log import get_task_logger
from tasks.jobs.attack_paths.config import (
@@ -87,21 +86,17 @@ def analysis(
prowler_api_provider: Provider,
scan_id: str,
config: CartographyConfig,
) -> tuple[int, int]:
) -> None:
"""
Main entry point for Prowler findings analysis.
Adds resource labels and loads findings.
Returns (labeled_nodes, findings_loaded).
"""
total_labeled = add_resource_label(
add_resource_label(
neo4j_session, prowler_api_provider.provider, str(prowler_api_provider.uid)
)
findings_data = stream_findings_with_resources(prowler_api_provider, scan_id)
total_loaded = load_findings(
neo4j_session, findings_data, prowler_api_provider, config
)
return total_labeled, total_loaded
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
def add_resource_label(
@@ -151,11 +146,12 @@ def load_findings(
findings_batches: Generator[list[dict[str, Any]], None, None],
prowler_api_provider: Provider,
config: CartographyConfig,
) -> int:
) -> None:
"""Load Prowler findings into the graph, linking them to resources."""
query = render_cypher_template(
INSERT_FINDING_TEMPLATE,
{
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
"__RESOURCE_LABEL__": get_provider_resource_label(
prowler_api_provider.provider
@@ -164,6 +160,7 @@ def load_findings(
)
parameters = {
"provider_uid": str(prowler_api_provider.uid),
"last_updated": config.update_tag,
"prowler_version": ProwlerConfig.prowler_version,
}
@@ -181,7 +178,6 @@ def load_findings(
neo4j_session.run(query, parameters)
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
return total_records
# Findings Streaming (Generator-based)
@@ -32,14 +32,17 @@ ADD_RESOURCE_LABEL_TEMPLATE = """
"""
INSERT_FINDING_TEMPLATE = f"""
MATCH (account:__ROOT_NODE_LABEL__ {{id: $provider_uid}})
UNWIND $findings_data AS finding_data
OPTIONAL MATCH (resource_by_uid:__RESOURCE_LABEL__ {{__NODE_UID_FIELD__: finding_data.resource_uid}})
WITH finding_data, resource_by_uid
OPTIONAL MATCH (account)-->(resource_by_uid:__RESOURCE_LABEL__)
WHERE resource_by_uid.__NODE_UID_FIELD__ = finding_data.resource_uid
WITH account, finding_data, resource_by_uid
OPTIONAL MATCH (resource_by_id:__RESOURCE_LABEL__ {{id: finding_data.resource_uid}})
OPTIONAL MATCH (account)-->(resource_by_id:__RESOURCE_LABEL__)
WHERE resource_by_uid IS NULL
WITH finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
AND resource_by_id.id = finding_data.resource_uid
WITH account, finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
WHERE resource IS NOT NULL
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
+10 -38
View File
@@ -55,7 +55,6 @@ exception propagates to Celery.
import logging
import time
from typing import Any
from cartography.config import Config as CartographyConfig
@@ -145,12 +144,6 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
attack_paths_scan, task_id, tenant_cartography_config
)
scan_t0 = time.perf_counter()
logger.info(
f"Starting Attack Paths scan ({attack_paths_scan.id}) for "
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
)
subgraph_dropped = False
sync_completed = False
provider_gated = False
@@ -176,7 +169,6 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
# The real scan, where iterates over cloud services
t0 = time.perf_counter()
ingestion_exceptions = utils.call_within_event_loop(
cartography_ingestion_function,
tmp_neo4j_session,
@@ -185,23 +177,19 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
prowler_sdk_provider,
attack_paths_scan,
)
logger.info(
f"Cartography ingestion completed in {time.perf_counter() - t0:.3f}s "
f"(failed_syncs={len(ingestion_exceptions)})"
)
# Post-processing: Just keeping it to be more Cartography compliant
logger.info(
f"Syncing Cartography ontology for AWS account {prowler_api_provider.uid}"
)
cartography_ontology.run(tmp_neo4j_session, tmp_cartography_config)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
logger.info(
f"Syncing Cartography analysis for AWS account {prowler_api_provider.uid}"
)
cartography_analysis.run(tmp_neo4j_session, tmp_cartography_config)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
# Creating Internet node and CAN_ACCESS relationships
logger.info(
@@ -210,20 +198,14 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
internet.analysis(
tmp_neo4j_session, prowler_api_provider, tmp_cartography_config
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
# Adding Prowler Finding nodes and relationships
logger.info(
f"Syncing Prowler analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
labeled_nodes, findings_loaded = findings.analysis(
findings.analysis(
tmp_neo4j_session, prowler_api_provider, scan_id, tmp_cartography_config
)
logger.info(
f"Prowler analysis completed in {time.perf_counter() - t0:.3f}s "
f"(findings={findings_loaded}, labeled_nodes={labeled_nodes})"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 97)
logger.info(
@@ -245,33 +227,22 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
db_utils.set_provider_graph_data_ready(attack_paths_scan, False)
provider_gated = True
t0 = time.perf_counter()
deleted_nodes = graph_database.drop_subgraph(
graph_database.drop_subgraph(
database=tenant_database_name,
provider_id=str(prowler_api_provider.id),
)
logger.info(
f"Deleted existing provider graph in {time.perf_counter() - t0:.3f}s "
f"(deleted_nodes={deleted_nodes})"
)
subgraph_dropped = True
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 98)
logger.info(
f"Syncing graph from {tmp_database_name} into {tenant_database_name}"
)
t0 = time.perf_counter()
sync_result = sync.sync_graph(
sync.sync_graph(
source_database=tmp_database_name,
target_database=tenant_database_name,
tenant_id=str(prowler_api_provider.tenant_id),
provider_id=str(prowler_api_provider.id),
)
logger.info(
f"Synced graph in {time.perf_counter() - t0:.3f}s "
f"(nodes={sync_result['nodes']}, relationships={sync_result['relationships']})"
)
sync_completed = True
db_utils.set_graph_data_ready(attack_paths_scan, True)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 99)
@@ -279,16 +250,17 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
logger.info(f"Clearing Neo4j cache for database {tenant_database_name}")
graph_database.clear_cache(tenant_database_name)
logger.info(
f"Completed Cartography ({attack_paths_scan.id}) for "
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
)
logger.info(f"Dropping temporary Neo4j database {tmp_database_name}")
graph_database.drop_database(tmp_database_name)
db_utils.finish_attack_paths_scan(
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
)
logger.info(
f"Attack Paths scan completed in {time.perf_counter() - scan_t0:.3f}s "
f"(state=completed, failed_syncs={len(ingestion_exceptions)})"
)
return ingestion_exceptions
except Exception as e:
@@ -5,8 +5,6 @@ This module handles syncing graph data from temporary scan databases
to the tenant database, adding provider isolation labels and properties.
"""
import time
from collections import defaultdict
from typing import Any
@@ -83,7 +81,6 @@ def sync_nodes(
Source and target sessions are opened sequentially per batch to avoid
holding two Bolt connections simultaneously for the entire sync duration.
"""
t0 = time.perf_counter()
last_id = -1
total_synced = 0
@@ -120,7 +117,7 @@ def sync_nodes(
total_synced += batch_count
logger.info(
f"Synced {total_synced} nodes from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
f"Synced {total_synced} nodes from {source_database} to {target_database}"
)
return total_synced
@@ -139,7 +136,6 @@ def sync_relationships(
Source and target sessions are opened sequentially per batch to avoid
holding two Bolt connections simultaneously for the entire sync duration.
"""
t0 = time.perf_counter()
last_id = -1
total_synced = 0
@@ -170,7 +166,7 @@ def sync_relationships(
total_synced += batch_count
logger.info(
f"Synced {total_synced} relationships from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
f"Synced {total_synced} relationships from {source_database} to {target_database}"
)
return total_synced
+1 -9
View File
@@ -752,19 +752,11 @@ def _process_finding_micro_batch(
)
if mappings_to_create:
created_mappings = ResourceFindingMapping.objects.bulk_create(
ResourceFindingMapping.objects.bulk_create(
mappings_to_create,
batch_size=SCAN_DB_BATCH_SIZE,
ignore_conflicts=True,
unique_fields=["tenant_id", "resource_id", "finding_id"],
)
inserted = sum(1 for m in created_mappings if m.pk)
if inserted != len(mappings_to_create):
logger.error(
f"scan {scan_instance.id}: expected "
f"{len(mappings_to_create)} ResourceFindingMapping rows, "
f"inserted {inserted}. Rolling back micro-batch."
)
# Update finding denormalized arrays
findings_to_update = []
@@ -38,14 +38,11 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph", return_value=0)
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -191,7 +188,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -290,7 +287,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -393,7 +390,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -492,17 +489,14 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch(
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
side_effect=RuntimeError("drop failed"),
)
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -615,7 +609,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -724,14 +718,11 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -842,17 +833,14 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch(
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
side_effect=RuntimeError("drop failed"),
)
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -1286,6 +1274,10 @@ class TestAttackPathsFindingsHelpers:
mock_session = MagicMock()
with (
patch(
"tasks.jobs.attack_paths.findings.get_root_node_label",
return_value="AWSAccount",
),
patch(
"tasks.jobs.attack_paths.findings.get_node_uid_field",
return_value="arn",
@@ -1302,6 +1294,7 @@ class TestAttackPathsFindingsHelpers:
assert mock_session.run.call_count == 2
for call_args in mock_session.run.call_args_list:
params = call_args.args[1]
assert params["provider_uid"] == str(provider.uid)
assert params["last_updated"] == config.update_tag
assert "findings_data" in params
@@ -1680,6 +1673,10 @@ class TestAttackPathsFindingsHelpers:
yield # Make it a generator
with (
patch(
"tasks.jobs.attack_paths.findings.get_root_node_label",
return_value="AWSAccount",
),
patch(
"tasks.jobs.attack_paths.findings.get_node_uid_field",
return_value="arn",
-64
View File
@@ -1,64 +0,0 @@
# Prowler Reverse Proxy Configuration
Ready-to-use nginx configuration for running Prowler behind a reverse proxy.
## Problem
Prowler's default Docker setup exposes two separate services:
- **UI** on port 3000
- **API** on port 8080
This causes CORS issues and authentication failures (especially SAML SSO) when accessed through an external reverse proxy, since the proxy typically exposes a single domain.
## Solution
This adds an nginx container that unifies both services behind a single port, correctly forwarding headers so that Django generates proper URLs for SAML ACS callbacks and API responses.
## Quick Start
From the prowler root directory:
docker compose -f docker-compose.yml \
-f contrib/reverse-proxy/docker-compose.reverse-proxy.yml \
up -d
Access Prowler at http://localhost (port 80).
## With an External Reverse Proxy
Point your external reverse proxy to the prowler-nginx container on port 80.
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| PROWLER_PROXY_PORT | 80 | Port exposed by the nginx proxy |
### Example: Traefik
services:
nginx:
labels:
- "traefik.enable=true"
- "traefik.http.routers.prowler.rule=Host(`prowler.example.com`)"
- "traefik.http.routers.prowler.tls.certresolver=letsencrypt"
- "traefik.http.services.prowler.loadbalancer.server.port=80"
### Example: Caddy
prowler.example.com {
reverse_proxy prowler-nginx:80
}
## SAML SSO
If using SAML SSO behind a reverse proxy, also set the SAML_ACS_BASE_URL environment variable:
SAML_ACS_BASE_URL=https://prowler.example.com
## Architecture
Internet -> External Reverse Proxy -> prowler-nginx:80
|-- /api/* -> prowler-api:8080
|-- /accounts/saml/ -> prowler-api:8080
+-- /* -> prowler-ui:3000
@@ -1,42 +0,0 @@
# Prowler Reverse Proxy - Docker Compose Override
#
# Use this alongside the main docker-compose.yml to add an nginx
# reverse proxy that unifies UI and API behind a single port.
#
# Usage:
# docker compose -f docker-compose.yml -f contrib/reverse-proxy/docker-compose.reverse-proxy.yml up -d
#
# Then access Prowler at http://localhost (port 80) or configure
# your external reverse proxy (Traefik, Caddy, Cloudflare Tunnel,
# Pangolin, etc.) to point to this container on port 80.
#
# For HTTPS with your own certs, see the README in this directory.
#
# Fixes: https://github.com/prowler-cloud/prowler/issues/8516
services:
nginx:
image: nginx:alpine
container_name: prowler-nginx
restart: unless-stopped
ports:
- "${PROWLER_PROXY_PORT:-80}:80"
volumes:
- ./contrib/reverse-proxy/nginx.conf:/etc/nginx/conf.d/default.conf:ro
depends_on:
- prowler-ui
- prowler-api
networks:
- prowler-network
# Override UI to not expose port externally (nginx handles it)
prowler-ui:
ports: !reset []
# Override API to not expose port externally (nginx handles it)
prowler-api:
ports: !reset []
networks:
prowler-network:
driver: bridge
-70
View File
@@ -1,70 +0,0 @@
# Prowler Reverse Proxy Configuration
# Routes both UI and API through a single endpoint
#
# Usage: See docker-compose.reverse-proxy.yml
# Fixes: https://github.com/prowler-cloud/prowler/issues/8516
upstream prowler-ui {
server prowler-ui:3000;
}
upstream prowler-api {
server prowler-api:8080;
}
server {
listen 80;
server_name _;
# Security headers
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# API requests — proxy to prowler-api
location /api/ {
proxy_pass http://prowler-api/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_read_timeout 300s;
proxy_connect_timeout 10s;
# Handle large scan payloads
client_max_body_size 50m;
}
# SAML endpoints — proxy to prowler-api
location /accounts/saml/ {
proxy_pass http://prowler-api/accounts/saml/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
}
# Everything else — proxy to prowler-ui
location / {
proxy_pass http://prowler-ui/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
# WebSocket support for Next.js HMR (dev) and live updates
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Health check endpoint
location /health {
access_log off;
return 200 "ok\n";
add_header Content-Type text/plain;
}
}
@@ -7,11 +7,6 @@ Prowler requires AWS credentials to function properly. Authentication is availab
- Static Credentials
- Assumed Role
When using **Assumed Role**, the Prowler UI exposes two credential sources for calling `sts:AssumeRole`. The labels differ between Prowler Cloud and self-hosted Prowler App, but both map to the same underlying credential types:
- **AWS SDK Default** (shown as *"Prowler Cloud will assume your IAM role"* in Prowler Cloud and *"AWS SDK Default"* in self-hosted Prowler App): Prowler uses the credentials already available to the API and worker containers through the [AWS SDK default credential chain](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). This is the default in Prowler Cloud and requires extra configuration in self-hosted Prowler App (see [Configuring AWS SDK Default for Self-Hosted Prowler App](#configuring-aws-sdk-default-for-self-hosted-prowler-app)).
- **Access & Secret Key**: You paste an IAM user's `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN` into the form. Prowler uses those keys to call `sts:AssumeRole`.
## Required Permissions
To ensure full functionality, attach the following AWS managed policies to the designated user or role:
@@ -81,68 +76,6 @@ This method grants permanent access and is the recommended setup for production
---
## Configuring AWS SDK Default for Self-Hosted Prowler App
When self-hosting Prowler App with Docker Compose, the API and worker containers do not have AWS credentials by default. Selecting **AWS SDK Default** without configuring those credentials produces:
```
AWSAssumeRoleError[1012]: AWS assume role error - An error occurred (InvalidClientTokenId) when calling the AssumeRole operation: The security token included in the request is invalid.
```
To fix this, expose an IAM identity with `sts:AssumeRole` permission on the target role to both the `api` and `worker` services.
### Option 1: Environment Variables in `.env`
Add the following keys to the `.env` file used by `docker-compose.yml`:
```bash
AWS_ACCESS_KEY_ID="<your-access-key-id>"
AWS_SECRET_ACCESS_KEY="<your-secret-access-key>"
AWS_SESSION_TOKEN="<optional-session-token>"
AWS_DEFAULT_REGION="us-east-1"
```
The existing `docker-compose.yml` already loads `.env` into the `api`, `worker`, and `worker-beat` services, so `boto3` will pick them up through the default credential chain.
<Warning>
Treat the `.env` file as a secret. Do not commit it to version control, scope the IAM identity to the minimum permissions required (`sts:AssumeRole` on the target `ProwlerScan` role only), prefer short-lived credentials over long-lived access keys, and rotate the keys immediately if you suspect exposure.
</Warning>
Recreate the containers to apply the change. A plain `docker compose restart` will **not** reload values from a modified `.env` file — you must force-recreate:
```bash
docker compose up -d --force-recreate api worker worker-beat
```
### Option 2: IAM Role (Host with Instance Metadata)
If you run Prowler App on an EC2 instance, ECS task, or EKS pod with an attached IAM role that can assume the scan role, no extra configuration is needed — `boto3` resolves credentials through instance or task metadata automatically.
### Trust Policy: Align `IAMPrincipal` With Your Identity
The [Prowler scan role CloudFormation template](https://github.com/prowler-cloud/prowler/blob/master/permissions/templates/cloudformation/prowler-scan-role.yml) restricts the trust policy with:
```
aws:PrincipalArn StringLike arn:aws:iam::<AccountId>:<IAMPrincipal>
```
`IAMPrincipal` defaults to `role/prowler*`, which only allows IAM roles whose name starts with `prowler`. If the identity hosting the API and worker containers is anything else, the `sts:AssumeRole` call fails with `AccessDenied` even when the credentials themselves are valid.
Redeploy (or update) the CloudFormation stack with an `IAMPrincipal` that matches your identity:
| Your identity on the API/worker containers | `IAMPrincipal` value |
| --- | --- |
| IAM user (for example `prowler-app`) | `user/prowler-app` |
| IAM role whose name doesn't start with `prowler` | `role/<your-role-name>` |
`AccountId` must also point to the account where that identity lives — the default is Prowler Cloud's account and only applies when assuming from Prowler Cloud.
<Note>
The same `External ID` entered in the Prowler UI must match the `ExternalId` parameter used when deploying the CloudFormation stack. A mismatch produces `AccessDenied` on `sts:AssumeRole`, not `InvalidClientTokenId`.
</Note>
---
## Credentials
<Tabs>
@@ -46,15 +46,15 @@ Before proceeding, choose the preferred authentication mode:
**Credentials**
* Quick scan using an IAM user's access keys
* No extra setup in AWS
* Static keys can be rotated or revoked at any time
* Quick scan as current user
* No extra setup
* Credentials time out
**Assumed Role**
* Recommended for production
* With AWS SDK Default as the credential source, no long-lived keys are stored in Prowler (Access & Secret Key still requires pasted keys)
* Requires permission to create an IAM role in the target account
* Preferred Setup
* Permanent Credentials
* Requires access to create role
---
@@ -67,23 +67,18 @@ This method grants permanent access and is the recommended setup for production
For detailed instructions on how to create the role, see [Authentication > Assume Role](/user-guide/providers/aws/authentication#assume-role-recommended).
7. Once the role is created, go to the **IAM Console**, click on the "ProwlerScan" role to open its details:
8. Once the role is created, go to the **IAM Console**, click on the "ProwlerScan" role to open its details:
![ProwlerScan role info](/images/providers/prowler-scan-pre-info.png)
8. Copy the **Role ARN**
9. Copy the **Role ARN**
![New Role Info](/images/providers/get-role-arn.png)
9. Paste the ARN into the corresponding field in Prowler Cloud or Prowler App
10. Paste the ARN into the corresponding field in Prowler Cloud or Prowler App
![Input the Role ARN](/images/providers/paste-role-arn-prowler.png)
10. Select the credential source Prowler should use to call `sts:AssumeRole`. The option label differs between deployments but both map to the same `aws-sdk-default` credential type:
- **"Prowler Cloud will assume your IAM role"** (default in Prowler Cloud) / **"AWS SDK Default"** (in self-hosted Prowler App): Prowler uses the credentials available in the API and worker environment through the [AWS SDK default credential chain](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). In self-hosted Prowler App, these containers have no AWS credentials by default — see [Configuring AWS SDK Default for Self-Hosted Prowler App](/user-guide/providers/aws/authentication#configuring-aws-sdk-default-for-self-hosted-prowler-app) before choosing this option, or the connection test will fail with `InvalidClientTokenId`.
- **Access & Secret Key**: Paste an IAM user's `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` (and optional `AWS_SESSION_TOKEN`) into the form. The IAM principal must be allowed to assume the target role and must match the `IAMPrincipal` parameter of the scan role template (default: `role/prowler*`).
11. Click "Next", then "Launch Scan"
![Next button in Prowler Cloud](/images/providers/next-button-prowler-cloud.png)
+3 -3
View File
@@ -911,11 +911,11 @@ wheels = [
[[package]]
name = "python-dotenv"
version = "1.2.2"
version = "1.1.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135, upload-time = "2026-03-01T16:00:26.196Z" }
sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" },
{ url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
]
[[package]]
+8 -18
View File
@@ -2,31 +2,21 @@
All notable changes to the **Prowler SDK** are documented in this file.
## [5.24.3] (Prowler v5.24.3)
### 🐞 Fixed
- CloudTrail resource timeline uses resource name as fallback in `LookupEvents` [(#10828)](https://github.com/prowler-cloud/prowler/pull/10828)
---
## [5.24.1] (Prowler v5.24.1)
### 🚀 Added
- `--repo-list-file` CLI flag for GitHub provider to load repositories from a file [(#10501)](https://github.com/prowler-cloud/prowler/pull/10501)
## [5.25.0] (Prowler UNRELEASED)
### 🔄 Changed
- `msgraph-sdk` from 1.23.0 to 1.55.0 and `azure-mgmt-resource` from 23.3.0 to 24.0.0, removing `marshmallow` as is a transitively dev dependency [(#10733)](https://github.com/prowler-cloud/prowler/pull/10733)
- bumped `msgraph-sdk` from 1.23.0 to 1.55.0 and `azure-mgmt-resource` from 23.3.0 to 24.0.0, removing `marshmallow` as is a transitively dev dependency [(#10733)](https://github.com/prowler-cloud/prowler/pull/10733)
---
## [5.24.1] (Prowler UNRELEASED)
### 🐞 Fixed
- Cloudflare account-scoped API tokens failing connection test in the App with `CloudflareUserTokenRequiredError` [(#10723)](https://github.com/prowler-cloud/prowler/pull/10723)
- `prowler image --registry-list` crashes with `AttributeError` because `ImageProvider.__init__` returns early before registering the global provider [(#10691)](https://github.com/prowler-cloud/prowler/pull/10691)
- `prowler image --registry` failing with `ImageNoImagesProvidedError` due to registry arguments not being forwarded to `ImageProvider` in `init_global_provider` [(#10470)](https://github.com/prowler-cloud/prowler/pull/10470)
- Google Workspace Calendar checks false FAIL on unconfigured settings with secure Google defaults [(#10726)](https://github.com/prowler-cloud/prowler/pull/10726)
- Google Workspace Drive checks false FAIL on unconfigured settings with secure Google defaults [(#10727)](https://github.com/prowler-cloud/prowler/pull/10727)
- Cloudflare `validate_credentials` can hang in an infinite pagination loop when the SDK repeats accounts, blocking connection tests [(#10771)](https://github.com/prowler-cloud/prowler/pull/10771)
---
@@ -56,6 +46,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `prowler image --registry-list` crashes with `AttributeError` because `ImageProvider.__init__` returns early before registering the global provider [(#10691)](https://github.com/prowler-cloud/prowler/pull/10691)
- Vercel firewall config handling for team-scoped projects and current API response shapes [(#10695)](https://github.com/prowler-cloud/prowler/pull/10695)
- Google Workspace Drive checks false FAIL on unconfigured settings with secure Google defaults [(#10727)](https://github.com/prowler-cloud/prowler/pull/10727)
---
@@ -106,7 +97,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Oracle Cloud `kms_key_rotation_enabled` now checks current key version age to avoid false positives on vaults without auto-rotation support [(#10450)](https://github.com/prowler-cloud/prowler/pull/10450)
- OCI filestorage, blockstorage, KMS, and compute services now honor `--region` for scanning outside the tenancy home region [(#10472)](https://github.com/prowler-cloud/prowler/pull/10472)
- OCI provider now supports multi-region filtering via `--region` [(#10473)](https://github.com/prowler-cloud/prowler/pull/10473)
- `prowler image --registry` failing with `ImageNoImagesProvidedError` due to registry arguments not being forwarded to `ImageProvider` in `init_global_provider` [(#10470)](https://github.com/prowler-cloud/prowler/pull/10470)
- OCI multi-region support for identity client configuration in blockstorage, identity, and filestorage services [(#10520)](https://github.com/prowler-cloud/prowler/pull/10520)
- Google Workspace Calendar checks now filter for customer-level policies only, skipping OU and group overrides that could produce incorrect audit results [(#10658)](https://github.com/prowler-cloud/prowler/pull/10658)
+1 -481
View File
@@ -1,10 +1,9 @@
import json
import os
import sys
from enum import Enum
from typing import Optional, Union
from pydantic.v1 import BaseModel, Field, ValidationError, root_validator
from pydantic.v1 import BaseModel, ValidationError, root_validator
from prowler.lib.check.utils import list_compliance_modules
from prowler.lib.logger import logger
@@ -431,482 +430,3 @@ def load_compliance_framework(
sys.exit(1)
else:
return compliance_framework
# ─── Universal Compliance Schema Models (Phase 1-3) ─────────────────────────
class OutputFormats(BaseModel):
"""Flags indicating in which output formats an attribute should be included."""
csv: bool = True
ocsf: bool = True
class AttributeMetadata(BaseModel):
"""Schema descriptor for a single attribute field in a universal compliance framework."""
key: str
label: Optional[str] = None
type: str = "str" # str, int, float, list_str, list_dict, bool
enum: Optional[list] = None
required: bool = False
enum_display: Optional[dict] = None # enum_value -> EnumValueDisplay dict
enum_order: Optional[list] = None # explicit ordering of enum values
chart_label: Optional[str] = None # axis label when used in charts
output_formats: OutputFormats = Field(default_factory=OutputFormats)
class SplitByConfig(BaseModel):
"""Column-splitting configuration (e.g. CIS Level 1/Level 2)."""
field: str
values: list
class ScoringConfig(BaseModel):
"""Weighted scoring configuration (e.g. ThreatScore)."""
risk_field: str
weight_field: str
class TableLabels(BaseModel):
"""Custom pass/fail labels for console table rendering."""
pass_label: str = "PASS"
fail_label: str = "FAIL"
provider_header: str = "Provider"
group_header: Optional[str] = None
status_header: str = "Status"
title: Optional[str] = None
results_title: Optional[str] = None
footer_note: Optional[str] = None
class TableConfig(BaseModel):
"""Declarative rendering instructions for the console compliance table."""
group_by: str
split_by: Optional[SplitByConfig] = None
scoring: Optional[ScoringConfig] = None
labels: Optional[TableLabels] = None
class EnumValueDisplay(BaseModel):
"""Per-enum-value visual metadata for PDF rendering.
Replaces hardcoded DIMENSION_MAPPING, TIPO_ICONS, nivel colors.
"""
label: Optional[str] = None # "Trazabilidad"
abbreviation: Optional[str] = None # "T"
color: Optional[str] = None # "#4286F4"
icon: Optional[str] = None # emoji
class ChartConfig(BaseModel):
"""Declarative chart description for PDF reports."""
id: str
type: str # vertical_bar | horizontal_bar | radar
group_by: str # attribute key to group by
title: Optional[str] = None
x_label: Optional[str] = None
y_label: Optional[str] = None
value_source: str = "compliance_percent"
color_mode: str = "by_value" # by_value | fixed | by_group
fixed_color: Optional[str] = None
class ScoringFormula(BaseModel):
"""Weighted scoring formula (e.g. ThreatScore)."""
risk_field: str # "LevelOfRisk"
weight_field: str # "Weight"
risk_boost_factor: float = 0.25 # rfac = 1 + factor * risk_level
class CriticalRequirementsFilter(BaseModel):
"""Filter for critical requirements section in PDF reports."""
filter_field: str # "LevelOfRisk"
min_value: Optional[int] = None # 4 (int-based filter)
filter_value: Optional[str] = None # "alto" (string-based filter)
status_filter: str = "FAIL"
title: Optional[str] = None # "Critical Failed Requirements"
class ReportFilter(BaseModel):
"""Default report filtering for PDF generation."""
only_failed: bool = True
include_manual: bool = False
class I18nLabels(BaseModel):
"""Localized labels for PDF report rendering."""
report_title: Optional[str] = None
page_label: str = "Page"
powered_by: str = "Powered by Prowler"
framework_label: str = "Framework:"
version_label: str = "Version:"
provider_label: str = "Provider:"
description_label: str = "Description:"
compliance_score_label: str = "Compliance Score by Sections"
requirements_index_label: str = "Requirements Index"
detailed_findings_label: str = "Detailed Findings"
class PDFConfig(BaseModel):
"""Declarative PDF report configuration.
Drives the API report generator from JSON data instead of hardcoded
Python config. Colors are hex strings (e.g. '#336699').
"""
language: str = "en"
logo_filename: Optional[str] = None
primary_color: Optional[str] = None
secondary_color: Optional[str] = None
bg_color: Optional[str] = None
sections: Optional[list] = None
section_short_names: Optional[dict] = None
group_by_field: Optional[str] = None
sub_group_by_field: Optional[str] = None
section_titles: Optional[dict] = None
charts: Optional[list] = None
scoring: Optional[ScoringFormula] = None
critical_filter: Optional[CriticalRequirementsFilter] = None
filter: Optional[ReportFilter] = None
labels: Optional[I18nLabels] = None
class UniversalComplianceRequirement(BaseModel):
"""Universal requirement with flat dict-based attributes."""
id: str
description: str
name: Optional[str] = None
attributes: dict = Field(default_factory=dict)
checks: dict[str, list[str]] = Field(default_factory=dict)
tactics: Optional[list] = None
sub_techniques: Optional[list] = None
platforms: Optional[list] = None
technique_url: Optional[str] = None
class OutputsConfig(BaseModel):
"""Container for output-related configuration (table, PDF, etc.)."""
table_config: Optional[TableConfig] = None
pdf_config: Optional[PDFConfig] = None
class ComplianceFramework(BaseModel):
"""Universal top-level container for any compliance framework.
Provider may be explicit (single-provider JSON) or derived from checks
keys across all requirements.
"""
framework: str
name: str
provider: Optional[str] = None
version: Optional[str] = None
description: str
icon: Optional[str] = None
requirements: list[UniversalComplianceRequirement]
attributes_metadata: Optional[list[AttributeMetadata]] = None
outputs: Optional[OutputsConfig] = None
@root_validator
# noqa: F841 - since vulture raises unused variable 'cls'
def validate_attributes_against_metadata(cls, values): # noqa: F841
"""Validate every Requirement's attributes dict against attributes_metadata.
Checks:
- Required keys (required=True) must be present in each Requirement.
- Enum-constrained keys must have a value within the declared enum list.
- Basic type validation (int, float, bool) for non-None values.
"""
metadata = values.get("attributes_metadata")
requirements = values.get("requirements", [])
if not metadata:
return values
required_keys = {m.key for m in metadata if m.required}
valid_keys = {m.key for m in metadata}
enum_map = {m.key: m.enum for m in metadata if m.enum}
type_map = {m.key: m.type for m in metadata}
type_checks = {
"int": int,
"float": (int, float),
"bool": bool,
}
errors = []
for req in requirements:
attrs = req.attributes
# Required keys
for key in required_keys:
if key not in attrs or attrs[key] is None:
errors.append(
f"Requirement '{req.id}': missing required attribute '{key}'"
)
# Unknown keys — anything outside the declared schema is a typo or drift
unknown_keys = set(attrs) - valid_keys
for key in sorted(unknown_keys):
errors.append(
f"Requirement '{req.id}': unknown attribute '{key}' "
f"(not declared in attributes_metadata)"
)
# Enum validation
for key, allowed in enum_map.items():
if key in attrs and attrs[key] is not None:
if attrs[key] not in allowed:
errors.append(
f"Requirement '{req.id}': attribute '{key}' value "
f"'{attrs[key]}' not in {allowed}"
)
# Type validation for non-string types
for key in attrs:
if key not in valid_keys or attrs[key] is None:
continue
expected_type = type_map.get(key, "str")
py_type = type_checks.get(expected_type)
if py_type and not isinstance(attrs[key], py_type):
errors.append(
f"Requirement '{req.id}': attribute '{key}' expected "
f"type {expected_type}, got {type(attrs[key]).__name__}"
)
if errors:
detail = "\n ".join(errors)
raise ValueError(f"attributes_metadata validation failed:\n {detail}")
return values
def get_providers(self) -> list:
"""Derive the set of providers this framework supports.
Inspects checks keys across all requirements. Falls back to the
explicit provider field for single-provider frameworks with no
requirement-level checks.
"""
providers = set()
for req in self.requirements:
providers.update(k.lower() for k in req.checks.keys())
if self.provider and not providers:
providers.add(self.provider.lower())
return sorted(providers)
def supports_provider(self, provider: str) -> bool:
"""Return True if this framework has checks for the given provider."""
provider_lower = provider.lower()
for req in self.requirements:
if any(k.lower() == provider_lower for k in req.checks.keys()):
return True
return self.provider is not None and self.provider.lower() == provider_lower
# ─── Legacy-to-Universal Adapter (Phase 2) ──────────────────────────────────
def _infer_attribute_metadata(legacy: Compliance) -> Optional[list[AttributeMetadata]]:
"""Introspect the first requirement's attribute model to build attributes_metadata."""
try:
if not legacy.Requirements:
return None
first_req = legacy.Requirements[0]
# MITRE requirements have Tactics at top level, not in Attributes
if isinstance(first_req, Mitre_Requirement):
return None
if not first_req.Attributes:
return None
sample_attr = first_req.Attributes[0]
metadata = []
for field_name, field_obj in sample_attr.__fields__.items():
field_type = field_obj.outer_type_
type_str = "str"
enum_values = None
origin = getattr(field_type, "__origin__", None)
if field_type is int:
type_str = "int"
elif field_type is float:
type_str = "float"
elif field_type is bool:
type_str = "bool"
elif origin is list:
args = getattr(field_type, "__args__", ())
if args and args[0] is dict:
type_str = "list_dict"
else:
type_str = "list_str"
elif isinstance(field_type, type) and issubclass(field_type, Enum):
type_str = "str"
enum_values = [e.value for e in field_type]
metadata.append(
AttributeMetadata(
key=field_name,
type=type_str,
enum=enum_values,
required=field_obj.required,
)
)
return metadata
except Exception:
return None
def adapt_legacy_to_universal(legacy: Compliance) -> ComplianceFramework:
"""Convert a legacy Compliance object to a ComplianceFramework."""
universal_requirements = []
legacy_provider_key = legacy.Provider.lower()
for req in legacy.Requirements:
req_checks = {legacy_provider_key: list(req.Checks)} if req.Checks else {}
if isinstance(req, Mitre_Requirement):
# For MITRE, promote special fields and store raw attributes
raw_attrs = [attr.dict() for attr in req.Attributes]
attrs = {"_raw_attributes": raw_attrs}
universal_requirements.append(
UniversalComplianceRequirement(
id=req.Id,
description=req.Description,
name=req.Name,
attributes=attrs,
checks=req_checks,
tactics=req.Tactics,
sub_techniques=req.SubTechniques,
platforms=req.Platforms,
technique_url=req.TechniqueURL,
)
)
else:
# Standard requirement: flatten first attribute to dict
if req.Attributes:
attrs = req.Attributes[0].dict()
else:
attrs = {}
universal_requirements.append(
UniversalComplianceRequirement(
id=req.Id,
description=req.Description,
name=req.Name,
attributes=attrs,
checks=req_checks,
)
)
inferred_metadata = _infer_attribute_metadata(legacy)
return ComplianceFramework(
framework=legacy.Framework,
name=legacy.Name,
provider=legacy.Provider,
version=legacy.Version,
description=legacy.Description,
requirements=universal_requirements,
attributes_metadata=inferred_metadata,
)
def load_compliance_framework_universal(path: str) -> ComplianceFramework:
"""Load a compliance JSON as a ComplianceFramework, handling both new and legacy formats."""
try:
with open(path, "r") as f:
data = json.load(f)
if "attributes_metadata" in data or "requirements" in data:
# New universal format — parse directly
return ComplianceFramework(**data)
else:
# Legacy format — parse as Compliance, then adapt
legacy = Compliance(**data)
return adapt_legacy_to_universal(legacy)
except Exception as e:
logger.error(
f"Failed to load universal compliance framework from {path}: "
f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}"
)
return None
def _load_jsons_from_dir(dir_path: str, provider: str, bulk: dict) -> None:
"""Scan *dir_path* for JSON files and add matching frameworks to *bulk*."""
for filename in os.listdir(dir_path):
file_path = os.path.join(dir_path, filename)
if not (
os.path.isfile(file_path)
and filename.endswith(".json")
and os.stat(file_path).st_size > 0
):
continue
framework_name = filename.split(".json")[0]
if framework_name in bulk:
continue
fw = load_compliance_framework_universal(file_path)
if fw is None:
continue
if fw.provider and fw.provider.lower() == provider.lower():
bulk[framework_name] = fw
elif fw.supports_provider(provider):
bulk[framework_name] = fw
def get_bulk_compliance_frameworks_universal(provider: str) -> dict:
"""Bulk load all compliance frameworks relevant to the given provider.
Scans:
1. The **top-level** ``prowler/compliance/`` directory for multi-provider
JSONs (``Checks`` keyed by provider, no ``Provider`` field).
2. Every **provider sub-directory** (``prowler/compliance/{p}/``) so that
single-provider JSONs are also picked up.
A framework is included when its explicit ``Provider`` matches
(case-insensitive) **or** any requirement has dict-style ``Checks``
with a key for *provider*.
"""
bulk = {}
try:
available_modules = list_compliance_modules()
# Resolve the compliance root once (parent of provider sub-dirs).
compliance_root = None
seen_paths = set()
for module in available_modules:
dir_path = f"{module.module_finder.path}/{module.name.split('.')[-1]}"
if not os.path.isdir(dir_path) or dir_path in seen_paths:
continue
seen_paths.add(dir_path)
# Remember the root the first time we see a valid sub-dir.
if compliance_root is None:
compliance_root = module.module_finder.path
_load_jsons_from_dir(dir_path, provider, bulk)
# Also scan top-level compliance/ for provider-agnostic JSONs.
if compliance_root and os.path.isdir(compliance_root):
_load_jsons_from_dir(compliance_root, provider, bulk)
except Exception as e:
logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}")
return bulk
@@ -135,54 +135,25 @@ class CloudTrailTimeline(TimelineService):
) -> List[Dict[str, Any]]:
"""Query CloudTrail for events related to a specific resource.
CloudTrail's ResourceName attribute is populated per-service by AWS
and is not consistent: KMS and SNS store full ARNs, while S3, IAM,
EC2, Lambda, RDS and others store only the resource name or ID. We
first look up using the identifier as-is, and if no events come back
we retry with the last segment extracted from the ARN.
Uses MaxResults to limit the number of events returned, preparing
for API-level pagination. Currently returns up to max_results events
from the first page only.
"""
client = self._get_client(region)
start_time = datetime.now(timezone.utc) - timedelta(days=self._lookback_days)
events = self._lookup_events_by_name(client, resource_identifier, start_time)
if not events and resource_identifier.startswith("arn:"):
short_name = self._extract_short_name(resource_identifier)
if short_name and short_name != resource_identifier:
logger.debug(
f"CloudTrail timeline: no events for '{resource_identifier}', "
f"retrying lookup with short name '{short_name}'"
)
events = self._lookup_events_by_name(client, short_name, start_time)
return events
def _lookup_events_by_name(
self, client, resource_name: str, start_time: datetime
) -> List[Dict[str, Any]]:
# Use direct API call with MaxResults instead of paginator
# This limits CloudTrail to return only max_results events
response = client.lookup_events(
LookupAttributes=[
{"AttributeKey": "ResourceName", "AttributeValue": resource_name}
{"AttributeKey": "ResourceName", "AttributeValue": resource_identifier}
],
StartTime=start_time,
MaxResults=self._max_results,
)
return response.get("Events", [])
@staticmethod
def _extract_short_name(identifier: str) -> str:
"""Return the last segment of an ARN or identifier.
ARNs take the form `arn:partition:service:region:account:resource-info`
where resource-info is one of `name`, `type/name`, or `type:name`.
Splitting on the final `/` and then the final `:` yields the value
CloudTrail stores for most services: S3 bucket name, IAM user/role
name, EC2 resource ID, Lambda function name, RDS DB identifier, etc.
"""
if not identifier:
return identifier
return identifier.rsplit("/", 1)[-1].rsplit(":", 1)[-1]
def _parse_event(self, raw_event: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Parse a raw CloudTrail event into a TimelineEvent dictionary."""
try:
@@ -274,12 +274,8 @@ class CloudflareProvider(Provider):
for account in client.accounts.list():
account_id = getattr(account, "id", None)
# Prevent infinite loop on repeated pages from the SDK paginator
# Prevent infinite loop - skip if we've seen this account
if account_id in seen_account_ids:
logger.warning(
"Detected repeated Cloudflare account ID while listing accounts. "
"Stopping pagination to avoid an infinite loop."
)
break
seen_account_ids.add(account_id)
@@ -399,20 +395,7 @@ class CloudflareProvider(Provider):
# Fallback: try accounts.list()
try:
accounts: list = []
seen_account_ids: set = set()
for account in client.accounts.list():
account_id = getattr(account, "id", None)
# Prevent infinite loop on repeated pages from the SDK paginator
if account_id in seen_account_ids:
logger.warning(
"Detected repeated Cloudflare account ID while validating credentials. "
"Stopping pagination to avoid an infinite loop."
)
break
seen_account_ids.add(account_id)
accounts.append(account)
accounts = list(client.accounts.list())
if not accounts:
logger.error("CloudflareNoAccountsError: No accounts found")
raise CloudflareNoAccountsError(
-1
View File
@@ -280,7 +280,6 @@ class Provider(ABC):
mutelist_path=arguments.mutelist_file,
config_path=arguments.config_file,
repositories=repos,
repo_list_file=getattr(arguments, "repo_list_file", None),
organizations=orgs,
)
elif "googleworkspace" in provider_class_name.lower():
@@ -34,14 +34,6 @@ class GithubBaseException(ProwlerException):
"message": "The provided provider ID does not match with the authenticated user or accessible organizations",
"remediation": "Check the provider ID and ensure it matches the authenticated user or an organization you have access to.",
},
(5007, "GithubRepoListFileNotFoundError"): {
"message": "The repo list file was not found",
"remediation": "Check the file path and ensure it exists.",
},
(5008, "GithubRepoListFileReadError"): {
"message": "Error reading the repo list file",
"remediation": "Check the file permissions and format.",
},
}
def __init__(self, code, file=None, original_exception=None, message=None):
@@ -112,21 +104,3 @@ class GithubInvalidProviderIdError(GithubCredentialsError):
super().__init__(
5006, file=file, original_exception=original_exception, message=message
)
class GithubRepoListFileNotFoundError(GithubBaseException):
"""Exception raised when the repo list file is not found."""
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
5007, file=file, original_exception=original_exception, message=message
)
class GithubRepoListFileReadError(GithubBaseException):
"""Exception raised when the repo list file cannot be read."""
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
5008, file=file, original_exception=original_exception, message=message
)
@@ -22,8 +22,6 @@ from prowler.providers.github.exceptions.exceptions import (
GithubInvalidCredentialsError,
GithubInvalidProviderIdError,
GithubInvalidTokenError,
GithubRepoListFileNotFoundError,
GithubRepoListFileReadError,
GithubSetUpIdentityError,
GithubSetUpSessionError,
)
@@ -92,8 +90,6 @@ class GithubProvider(Provider):
_type: str = "github"
_auth_method: str = None
MAX_REPO_LIST_LINES: int = 10_000
MAX_REPO_NAME_LENGTH: int = 500
_session: GithubSession
_identity: GithubIdentityInfo
_audit_config: dict
@@ -117,7 +113,6 @@ class GithubProvider(Provider):
mutelist_path: str = None,
mutelist_content: dict = None,
repositories: list = None,
repo_list_file: str = None,
organizations: list = None,
):
"""
@@ -135,7 +130,6 @@ class GithubProvider(Provider):
mutelist_path (str): Path to the mutelist file.
mutelist_content (dict): Mutelist content.
repositories (list): List of repository names to scan in 'owner/repo-name' format.
repo_list_file (str): Path to a file containing repository names (one per line).
organizations (list): List of organization or user names to scan repositories for.
"""
logger.info("Instantiating GitHub Provider...")
@@ -153,10 +147,6 @@ class GithubProvider(Provider):
else:
self._repositories = list(repositories)
# Load repos from file if provided
if repo_list_file:
self._load_repos_from_file(repo_list_file)
if organizations is None:
self._organizations = []
elif isinstance(organizations, str):
@@ -266,46 +256,6 @@ class GithubProvider(Provider):
"""
return self._organizations
def _load_repos_from_file(self, file_path: str) -> None:
"""Load repository names from a file (one per line)."""
try:
repo_count = 0
before = len(self._repositories)
with open(file_path, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
repo_count += 1
if repo_count > self.MAX_REPO_LIST_LINES:
raise GithubRepoListFileReadError(
file=file_path,
message=f"Repo list file exceeds maximum of {self.MAX_REPO_LIST_LINES} lines.",
)
if len(line) > self.MAX_REPO_NAME_LENGTH:
logger.warning(
f"Skipping repo name exceeding {self.MAX_REPO_NAME_LENGTH} chars at line {repo_count} in {file_path}"
)
continue
self._repositories.append(line)
self._repositories = list(dict.fromkeys(self._repositories))
logger.info(
f"Loaded {len(self._repositories) - before} repositories from {file_path}"
)
except FileNotFoundError:
raise GithubRepoListFileNotFoundError(
file=file_path,
message=f"Repo list file not found: {file_path}",
)
except (GithubRepoListFileReadError, GithubRepoListFileNotFoundError):
raise
except Exception as error:
raise GithubRepoListFileReadError(
file=file_path,
original_exception=error,
message=f"Error reading repo list file: {error}",
)
@staticmethod
def setup_session(
personal_access_token: str = None,
@@ -50,12 +50,6 @@ def init_parser(self):
default=None,
metavar="REPOSITORY",
)
github_scoping_subparser.add_argument(
"--repo-list-file",
dest="repo_list_file",
default=None,
help="Path to a file containing a list of repositories to scan (one per line in 'owner/repo-name' format). Lines starting with # are treated as comments.",
)
github_scoping_subparser.add_argument(
"--organization",
"--organizations",
+10 -568
View File
@@ -1,28 +1,16 @@
---
name: prowler-compliance
description: >
Creates, syncs, audits and manages Prowler compliance frameworks end-to-end.
Covers the four-layer architecture (SDK models → JSON catalogs → output
formatters → API/UI), upstream sync workflows, cloud-auditor check-mapping
reviews, output formatter creation, and framework-specific attribute models.
Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2,
GDPR, ISO27001, ENS, MITRE ATT&CK, CCC, C5, CSA CCM, KISA ISMS-P,
Prowler ThreatScore, FedRAMP, HIPAA), syncing with upstream catalogs,
auditing check-to-requirement mappings, adding output formatters, or fixing
compliance JSON bugs (duplicate IDs, empty Version, wrong Section, stale
check refs).
Creates and manages Prowler compliance frameworks.
Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2, GDPR, ISO27001, ENS, MITRE ATT&CK).
license: Apache-2.0
metadata:
author: prowler-cloud
version: "1.2"
version: "1.1"
scope: [root, sdk]
auto_invoke:
- "Creating/updating compliance frameworks"
- "Mapping checks to compliance controls"
- "Syncing compliance framework with upstream catalog"
- "Auditing check-to-requirement mappings as a cloud auditor"
- "Adding a compliance output formatter (per-provider class + table dispatcher)"
- "Fixing compliance JSON bugs (duplicate IDs, empty Section, stale refs)"
allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task
---
@@ -30,82 +18,10 @@ allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task
Use this skill when:
- Creating a new compliance framework for any provider
- **Syncing an existing framework with an upstream source of truth** (CIS, FINOS CCC, CSA CCM, NIST, ENS, etc.)
- Adding requirements to existing frameworks
- Mapping checks to compliance controls
- **Auditing existing check mappings as a cloud auditor** (user asks "are these mappings correct?", "which checks apply to this requirement?", "review the mappings")
- **Adding a new output formatter** (new framework needs a table dispatcher + per-provider classes + CSV models)
- **Fixing JSON bugs**: duplicate IDs, empty Version, wrong Section, stale check refs, inconsistent FamilyName, padded tangential check mappings
- **Registering a framework in the CLI table dispatcher or API export map**
- Investigating why a finding/check isn't showing under the expected compliance framework in the UI
- Understanding compliance framework structures and attributes
## Four-Layer Architecture (Mental Model)
Prowler compliance is a **four-layer system** hanging off one Pydantic model tree. Bugs usually happen where one layer doesn't match another, so know all four before touching anything.
### Layer 1: SDK / Core Models — `prowler/lib/check/`
- **`compliance_models.py`** — Pydantic **v1** model tree (`from pydantic.v1 import`). One `*_Requirement_Attribute` class per framework type + `Generic_Compliance_Requirement_Attribute` as fallback.
- `Compliance_Requirement.Attributes: list[Union[...]]`**`Generic_Compliance_Requirement_Attribute` MUST be LAST** in the Union or every framework-specific attribute falls through to Generic (Pydantic v1 tries union members in order).
- **`compliance.py`** — runtime linker. `get_check_compliance()` builds the key as `f"{Framework}-{Version}"` **only if `Version` is non-empty**. An empty Version makes the key just `"{Framework}"` — this breaks downstream filters and tests that expect the versioned key.
- `Compliance.get_bulk(provider)` walks `prowler/compliance/{provider}/` and parses every `.json` file. No central index — just directory scan.
### Layer 2: JSON Frameworks — `prowler/compliance/{provider}/`
See "Compliance Framework Location" and "Framework-Specific Attribute Structures" sections below.
### Layer 3: Output Formatters — `prowler/lib/outputs/compliance/{framework}/`
**Every framework directory follows this exact convention** — do not deviate:
```
{framework}/
├── __init__.py
├── {framework}.py # ONLY get_{framework}_table() — NO function docstring
├── {framework}_{provider}.py # One class per provider (e.g., CCC_AWS, CCC_Azure, CCC_GCP)
└── models.py # One Pydantic v2 BaseModel per provider (CSV columns)
```
- **`{framework}.py`** holds the **table dispatcher function** `get_{framework}_table()`. It prints the pass/fail/muted summary table. **Must NOT import `Finding` or `ComplianceOutput`** — doing so creates a circular import with `prowler/lib/outputs/compliance/compliance.py`. Only imports: `colorama`, `tabulate`, `prowler.config.config.orange_color`.
- **`{framework}_{provider}.py`** holds a per-provider class like `CCC_AWS(ComplianceOutput)` with a `transform()` method that walks findings and emits rows. This file IS allowed to import `Finding` because it's not on the dispatcher import chain.
- **`models.py`** holds one Pydantic v2 `BaseModel` per provider. Field names become CSV column headers (**public API** — renaming breaks downstream consumers).
- **Never collapse per-provider files into a unified parameterized class**, even when DRY-tempting. Every framework in Prowler follows the per-provider file pattern and reviewers will reject the refactor. CSV columns differ per provider (`AccountId`/`Region` vs `SubscriptionId`/`Location` vs `ProjectId`/`Location`) — three classes is the convention.
- **No function docstring on `get_{framework}_table()`** — no other framework has one; stay consistent.
- Register in `prowler/lib/outputs/compliance/compliance.py``display_compliance_table()` with an `elif compliance_framework.startswith("{framework}_"):` branch. Import the table function at the top of the file.
### Layer 4: API / UI
- **API table dispatcher**: `api/src/backend/tasks/jobs/export.py``COMPLIANCE_CLASS_MAP` keyed by provider. Uses `startswith` predicates: `(lambda name: name.startswith("ccc_"), CCC_AWS)`. **Never use exact match** (`name == "ccc_aws"`) — it's inconsistent and breaks versioning.
- **API lazy loader**: `api/src/backend/api/compliance.py``LazyComplianceTemplate` and `LazyChecksMapping` load compliance per provider on first access.
- **UI mapper routing**: `ui/lib/compliance/compliance-mapper.ts` routes framework names → per-framework mapper.
- **UI per-framework mapper**: `ui/lib/compliance/{framework}.tsx` flattens `Requirements` into a 3-level tree (Framework → Category → Control → Requirement) for the accordion view. Groups by `Attributes[0].FamilyName` and `Attributes[0].Section`.
- **UI detail panel**: `ui/components/compliance/compliance-custom-details/{framework}-details.tsx`.
- **UI types**: `ui/types/compliance.ts` — TypeScript mirrors of the attribute metadata.
### The CLI Pipeline (end-to-end)
```
prowler aws --compliance ccc_aws
Compliance.get_bulk("aws") → parses prowler/compliance/aws/*.json
update_checks_metadata_with_compliance() → attaches compliance info to CheckMetadata
execute_checks() → runs checks, produces Finding objects
get_check_compliance(finding, "aws", bulk_checks_metadata)
→ dict "{Framework}-{Version}" → [requirement_ids]
CCC_AWS(findings, compliance).transform() → per-provider class builds CSV rows
batch_write_data_to_file() → writes {output_filename}_ccc_aws.csv
display_compliance_table() → get_ccc_table() → prints stdout summary
```
---
## Compliance Framework Location
Frameworks are JSON files located in: `prowler/compliance/{provider}/{framework_name}_{provider}.json`
@@ -539,453 +455,14 @@ Prowler ThreatScore is a custom security scoring framework developed by Prowler
- **M365:** `cis_4.0_m365.json`, `iso27001_2022_m365.json`
- **NHN:** `iso27001_2022_nhn.json`
## Workflow A: Sync a Framework With an Upstream Catalog
Use when the framework is maintained upstream (CIS Benchmarks, FINOS CCC, CSA CCM, NIST, ENS, etc.) and Prowler needs to catch up.
### Step 1 — Cache the upstream source
Download every upstream file to a local cache so subsequent iterations don't hit the network. For FINOS CCC:
```bash
mkdir -p /tmp/ccc_upstream
catalogs="core/ccc storage/object management/auditlog management/logging ..."
for p in $catalogs; do
safe=$(echo "$p" | tr '/' '_')
gh api "repos/finos/common-cloud-controls/contents/catalogs/$p/controls.yaml" \
-H "Accept: application/vnd.github.raw" > "/tmp/ccc_upstream/${safe}.yaml"
done
```
### Step 2 — Run the generic sync runner against a framework config
The sync tooling is split into three layers so adding a new framework only takes a YAML config (and optionally a new parser module for an unfamiliar upstream format):
```
skills/prowler-compliance/assets/
├── sync_framework.py # generic runner — works for any framework
├── configs/
│ └── ccc.yaml # per-framework config (canonical example)
└── parsers/
├── __init__.py
└── finos_ccc.py # parser module for FINOS CCC YAML
```
**For frameworks that already have a config + parser** (today: FINOS CCC), run:
```bash
python skills/prowler-compliance/assets/sync_framework.py \
skills/prowler-compliance/assets/configs/ccc.yaml
```
The runner loads the config, validates it, dynamically imports the parser declared in `parser.module`, calls `parser.parse_upstream(config) -> list[dict]`, then applies generic post-processing (id uniqueness safety net, `FamilyName` normalization, legacy check-mapping preservation) and writes the provider JSONs.
**To add a new framework sync**:
1. **Write a config file** at `skills/prowler-compliance/assets/configs/{framework}.yaml`. See `configs/ccc.yaml` as the canonical example. Required top-level sections:
- `framework``name`, `display_name`, `version` (**never empty** — empty Version silently breaks `get_check_compliance()` key construction, so the runner refuses to start), `description_template` (accepts `{provider_display}`, `{provider_key}`, `{framework_name}`, `{framework_display}`, `{version}` placeholders).
- `providers` — list of `{key, display}` pairs, one per Prowler provider the framework targets.
- `output.path_template` — supports `{provider}`, `{framework}`, `{version}` placeholders. Examples: `"prowler/compliance/{provider}/ccc_{provider}.json"` for unversioned file names, `"prowler/compliance/{provider}/cis_{version}_{provider}.json"` for versioned ones.
- `upstream.dir` — local cache directory (populate via Step 1).
- `parser.module` — name of the module under `parsers/` to load (without `.py`). Everything else under `parser.` is opaque to the runner and passed to the parser as config.
- `post_processing.check_preservation.primary_key` — top-level field name for the primary legacy-mapping lookup (almost always `Id`).
- `post_processing.check_preservation.fallback_keys`**config-driven fallback keys** for preserving check mappings when ids change. Each entry is a list of `Attributes[0]` field names composed into a tuple. Examples:
- CCC: `- [Section, Applicability]` (because `Applicability` is a CCC-only attribute, verified in `compliance_models.py:213`).
- CIS would use `- [Section, Profile]`.
- NIST would use `- [ItemId]`.
- List-valued fields (like `Applicability`) are automatically frozen to `frozenset` so the tuple is hashable.
- `post_processing.family_name_normalization` (optional) — map of raw → canonical `FamilyName` values. The UI groups by `Attributes[0].FamilyName` exactly, so inconsistent upstream variants otherwise become separate tree branches.
2. **Reuse an existing parser** if the upstream format matches one (currently only `finos_ccc` exists). Otherwise, **write a new parser** at `parsers/{name}.py` implementing:
```python
def parse_upstream(config: dict) -> list[dict]:
"""Return Prowler-format requirements {Id, Description, Attributes: [...], Checks: []}.
Ids MUST be unique in the returned list. The runner raises ValueError
on duplicates — it does NOT silently renumber, because mutating a
canonical upstream id (e.g. CIS '1.1.1' or NIST 'AC-2(1)') would be
catastrophic. The parser owns all upstream-format quirks: foreign-prefix
rewriting, genuine collision renumbering, shape handling.
"""
```
The parser reads its own settings from `config['upstream']` and `config['parser']`. It does NOT load existing Prowler JSONs (the runner does that for check preservation) and does NOT write output (the runner does that too).
**Gotchas the runner already handles for you** (learned from the FINOS CCC v2025.10 sync — they're documented here so you don't re-discover them):
- **Multiple upstream YAML shapes**. Most FINOS CCC catalogs use `control-families: [...]`, but `storage/object` uses a top-level `controls: [...]` with a `family: "CCC.X.Y"` reference id and no human-readable family name. A parser that only handles shape 1 silently drops the shape-2 catalog — this exact bug dropped ObjStor from Prowler for a full iteration. `parsers/finos_ccc.py` handles both shapes; if you write a new parser for a similar format, test with at least one file of each shape.
- **Whitespace collapse**. Upstream YAML multi-line block scalars (`|`) preserve newlines. Prowler stores descriptions single-line. Collapse with `" ".join(value.split())` before emitting (see `parsers/finos_ccc.py::clean()`).
- **Foreign-prefix AR id rewriting**. Upstream sometimes aliases requirements across catalogs by keeping the original prefix (e.g., `CCC.AuditLog.CN08.AR01` appears nested under `CCC.Logging.CN03`). Rewrite the foreign id to fit its parent control: `CCC.Logging.CN03.AR01`. This logic is parser-specific because the id structure varies per framework (CCC uses 3-dot depth; CIS uses numeric dots; NIST uses `AC-2(1)`).
- **Genuine upstream collision renumbering**. Sometimes upstream has a real typo where two different requirements share the same id (e.g., `CCC.Core.CN14.AR02` defined twice for 30-day and 14-day backup variants). Renumber the second copy to the next free AR number (`.AR03`). The parser handles this; the runner asserts the final list has unique ids as a safety net.
- **Existing check mapping preservation**. The runner uses the `primary_key` + `fallback_keys` declared in config to look up the old `Checks` list for each requirement. For CCC this means primary index by `Id` plus fallback index by `(Section, frozenset(Applicability))` — the fallback recovers mappings for requirements whose ids were rewritten or renumbered by the parser.
- **FamilyName normalization**. Configured via `post_processing.family_name_normalization` — no code changes needed to collapse upstream variants like `"Logging & Monitoring"``"Logging and Monitoring"`.
- **Populate `Version`**. The runner refuses to start on empty `framework.version` — fail-fast replaces the silent bug where `get_check_compliance()` would build the key as just `"{Framework}"`.
### Step 3 — Validate before committing
```python
from prowler.lib.check.compliance_models import Compliance
for prov in ['aws', 'azure', 'gcp']:
c = Compliance.parse_file(f"prowler/compliance/{prov}/ccc_{prov}.json")
print(f"{prov}: {len(c.Requirements)} reqs, version={c.Version}")
```
Any `ValidationError` means the Attribute fields don't match the `*_Requirement_Attribute` model. Either fix the JSON or extend the model in `compliance_models.py` (remember: Generic stays last).
### Step 4 — Verify every check id exists
```python
import json
from pathlib import Path
for prov in ['aws', 'azure', 'gcp']:
existing = {p.stem.replace('.metadata','')
for p in Path(f'prowler/providers/{prov}/services').rglob('*.metadata.json')}
with open(f'prowler/compliance/{prov}/ccc_{prov}.json') as f:
data = json.load(f)
refs = {c for r in data['Requirements'] for c in r['Checks']}
missing = refs - existing
assert not missing, f"{prov} missing: {missing}"
```
A stale check id silently becomes dead weight — no finding will ever map to it. This pre-validation **must run on every write**; bake it into the generator script.
### Step 5 — Add an attribute model if needed
Only if the framework has fields beyond `Generic_Compliance_Requirement_Attribute`. Add the class to `prowler/lib/check/compliance_models.py` and register it in `Compliance_Requirement.Attributes: list[Union[...]]`. **Generic stays last.**
---
## Workflow B: Audit Check Mappings as a Cloud Auditor
Use when the user asks to review existing mappings ("are these correct?", "verify that the checks apply", "audit the CCC mappings"). This is the highest-value compliance task — it surfaces padded mappings with zero actual coverage and missing mappings for legitimate coverage.
### The golden rule
> A Prowler check's title/risk MUST **literally describe what the requirement text says**. "Related" is not enough. If no check actually addresses the requirement, leave `Checks: []` (MANUAL) — **honest MANUAL is worth more than padded coverage**.
### Audit process
**Step 1 — Build a per-provider check inventory** (cache in `/tmp/`):
```python
import json
from pathlib import Path
for provider in ['aws', 'azure', 'gcp']:
inv = {}
for meta in Path(f'prowler/providers/{provider}/services').rglob('*.metadata.json'):
with open(meta) as f:
d = json.load(f)
cid = d.get('CheckID') or meta.stem.replace('.metadata','')
inv[cid] = {
'service': d.get('ServiceName', ''),
'title': d.get('CheckTitle', ''),
'risk': d.get('Risk', ''),
'description': d.get('Description', ''),
}
with open(f'/tmp/checks_{provider}.json', 'w') as f:
json.dump(inv, f, indent=2)
```
**Step 2 — Keyword/service query helper** — see [assets/query_checks.py](assets/query_checks.py):
```bash
python assets/query_checks.py aws encryption transit # keyword AND-search
python assets/query_checks.py aws --service iam # all iam checks
python assets/query_checks.py aws --id kms_cmk_rotation_enabled # full metadata
```
**Step 3 — Dump a framework section with current mappings** — see [assets/dump_section.py](assets/dump_section.py):
```bash
python assets/dump_section.py ccc "CCC.Core." # all Core ARs across 3 providers
python assets/dump_section.py ccc "CCC.AuditLog." # all AuditLog ARs
```
**Step 4 — Encode explicit REPLACE decisions** — see [assets/audit_framework_template.py](assets/audit_framework_template.py). Structure:
```python
DECISIONS = {}
DECISIONS["CCC.Core.CN01.AR01"] = {
"aws": [
"cloudfront_distributions_https_enabled",
"cloudfront_distributions_origin_traffic_encrypted",
# ...
],
"azure": [
"storage_secure_transfer_required_is_enabled",
"app_minimum_tls_version_12",
# ...
],
"gcp": [
"cloudsql_instance_ssl_connections",
],
# Missing provider key = leave the legacy mapping untouched
}
# Empty list = EXPLICITLY MANUAL (overwrites legacy)
DECISIONS["CCC.Core.CN01.AR07"] = {
"aws": [], # Prowler has no IANA port/protocol check
"azure": [],
"gcp": [],
}
```
**REPLACE, not PATCH.** Encoding every mapping as a full list (not add/remove delta) makes the audit reproducible and surfaces hidden assumptions from the legacy data.
**Step 5 — Pre-validation**. The audit script MUST validate every check id against the inventory and **abort with stderr listing typos**. Common typos caught during a real audit:
- `fsx_file_system_encryption_at_rest_using_kms` (doesn't exist)
- `cosmosdb_account_encryption_at_rest_with_cmk` (doesn't exist)
- `sqlserver_geo_replication` (doesn't exist)
- `redshift_cluster_audit_logging` (should be `redshift_cluster_encrypted_at_rest`)
- `postgresql_flexible_server_require_secure_transport` (should be `postgresql_flexible_server_enforce_ssl_enabled`)
- `storage_secure_transfer_required_enabled` (should be `storage_secure_transfer_required_is_enabled`)
- `sqlserver_minimum_tls_version_12` (should be `sqlserver_recommended_minimal_tls_version`)
**Step 6 — Apply + validate + test**:
```bash
python /path/to/audit_script.py # applies decisions, pre-validates
python -m pytest tests/lib/outputs/compliance/ tests/lib/check/ -q
```
### Audit Reference Table: Requirement Text → Prowler Checks
Use this table to map CCC-style / NIST-style / ISO-style requirements to the checks that actually verify them. Built from a real audit of 172 CCC ARs × 3 providers.
| Requirement text | AWS checks | Azure checks | GCP checks |
|---|---|---|---|
| **TLS in transit enforced** | `cloudfront_distributions_https_enabled`, `s3_bucket_secure_transport_policy`, `elbv2_ssl_listeners`, `elbv2_insecure_ssl_ciphers`, `elb_ssl_listeners`, `elb_insecure_ssl_ciphers`, `opensearch_service_domains_https_communications_enforced`, `rds_instance_transport_encrypted`, `redshift_cluster_in_transit_encryption_enabled`, `elasticache_redis_cluster_in_transit_encryption_enabled`, `dynamodb_accelerator_cluster_in_transit_encryption_enabled`, `dms_endpoint_ssl_enabled`, `kafka_cluster_in_transit_encryption_enabled`, `transfer_server_in_transit_encryption_enabled`, `glue_database_connections_ssl_enabled`, `sns_subscription_not_using_http_endpoints` | `storage_secure_transfer_required_is_enabled`, `storage_ensure_minimum_tls_version_12`, `postgresql_flexible_server_enforce_ssl_enabled`, `mysql_flexible_server_ssl_connection_enabled`, `mysql_flexible_server_minimum_tls_version_12`, `sqlserver_recommended_minimal_tls_version`, `app_minimum_tls_version_12`, `app_ensure_http_is_redirected_to_https`, `app_ftp_deployment_disabled` | `cloudsql_instance_ssl_connections` (almost only option) |
| **TLS 1.3 specifically** | Partial: `cloudfront_distributions_using_deprecated_ssl_protocols`, `elb*_insecure_ssl_ciphers`, `*_minimum_tls_version_12` | Partial: `*_minimum_tls_version_12` checks | None — accept as MANUAL |
| **SSH / port 22 hardening** | `ec2_instance_port_ssh_exposed_to_internet`, `ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22`, `ec2_networkacl_allow_ingress_tcp_port_22` | `network_ssh_internet_access_restricted`, `vm_linux_enforce_ssh_authentication` | `compute_firewall_ssh_access_from_the_internet_allowed`, `compute_instance_block_project_wide_ssh_keys_disabled`, `compute_project_os_login_enabled`, `compute_project_os_login_2fa_enabled` |
| **mTLS (mutual TLS)** | `kafka_cluster_mutual_tls_authentication_enabled`, `apigateway_restapi_client_certificate_enabled` | `app_client_certificates_on` | None — MANUAL |
| **Data at rest encrypted** | `s3_bucket_default_encryption`, `s3_bucket_kms_encryption`, `ec2_ebs_default_encryption`, `ec2_ebs_volume_encryption`, `rds_instance_storage_encrypted`, `rds_cluster_storage_encrypted`, `rds_snapshots_encrypted`, `dynamodb_tables_kms_cmk_encryption_enabled`, `redshift_cluster_encrypted_at_rest`, `neptune_cluster_storage_encrypted`, `documentdb_cluster_storage_encrypted`, `opensearch_service_domains_encryption_at_rest_enabled`, `kinesis_stream_encrypted_at_rest`, `firehose_stream_encrypted_at_rest`, `sns_topics_kms_encryption_at_rest_enabled`, `sqs_queues_server_side_encryption_enabled`, `efs_encryption_at_rest_enabled`, `athena_workgroup_encryption`, `glue_data_catalogs_metadata_encryption_enabled`, `backup_vaults_encrypted`, `backup_recovery_point_encrypted`, `cloudtrail_kms_encryption_enabled`, `cloudwatch_log_group_kms_encryption_enabled`, `eks_cluster_kms_cmk_encryption_in_secrets_enabled`, `sagemaker_notebook_instance_encryption_enabled`, `apigateway_restapi_cache_encrypted`, `kafka_cluster_encryption_at_rest_uses_cmk`, `dynamodb_accelerator_cluster_encryption_enabled`, `storagegateway_fileshare_encryption_enabled` | `storage_infrastructure_encryption_is_enabled`, `storage_ensure_encryption_with_customer_managed_keys`, `vm_ensure_attached_disks_encrypted_with_cmk`, `vm_ensure_unattached_disks_encrypted_with_cmk`, `sqlserver_tde_encryption_enabled`, `sqlserver_tde_encrypted_with_cmk`, `databricks_workspace_cmk_encryption_enabled`, `monitor_storage_account_with_activity_logs_cmk_encrypted` | `compute_instance_encryption_with_csek_enabled`, `dataproc_encrypted_with_cmks_disabled`, `bigquery_dataset_cmk_encryption`, `bigquery_table_cmk_encryption` |
| **CMEK required (customer-managed keys)** | `kms_cmk_are_used` | `storage_ensure_encryption_with_customer_managed_keys`, `vm_ensure_attached_disks_encrypted_with_cmk`, `vm_ensure_unattached_disks_encrypted_with_cmk`, `sqlserver_tde_encrypted_with_cmk`, `databricks_workspace_cmk_encryption_enabled` | `bigquery_dataset_cmk_encryption`, `bigquery_table_cmk_encryption`, `dataproc_encrypted_with_cmks_disabled`, `compute_instance_encryption_with_csek_enabled` |
| **Key rotation enabled** | `kms_cmk_rotation_enabled` | `keyvault_key_rotation_enabled`, `storage_key_rotation_90_days` | `kms_key_rotation_enabled` |
| **MFA for UI access** | `iam_root_mfa_enabled`, `iam_root_hardware_mfa_enabled`, `iam_user_mfa_enabled_console_access`, `iam_user_hardware_mfa_enabled`, `iam_administrator_access_with_mfa`, `cognito_user_pool_mfa_enabled` | `entra_privileged_user_has_mfa`, `entra_non_privileged_user_has_mfa`, `entra_user_with_vm_access_has_mfa`, `entra_security_defaults_enabled` | `compute_project_os_login_2fa_enabled` |
| **API access / credentials** | `iam_no_root_access_key`, `iam_user_no_setup_initial_access_key`, `apigateway_restapi_authorizers_enabled`, `apigateway_restapi_public_with_authorizer`, `apigatewayv2_api_authorizers_enabled` | `entra_conditional_access_policy_require_mfa_for_management_api`, `app_function_access_keys_configured`, `app_function_identity_is_configured` | `apikeys_api_restrictions_configured`, `apikeys_key_exists`, `apikeys_key_rotated_in_90_days` |
| **Log all admin/config changes** | `cloudtrail_multi_region_enabled`, `cloudtrail_multi_region_enabled_logging_management_events`, `cloudtrail_cloudwatch_logging_enabled`, `cloudtrail_log_file_validation_enabled`, `cloudwatch_log_metric_filter_*`, `cloudwatch_changes_to_*_alarm_configured`, `config_recorder_all_regions_enabled` | `monitor_diagnostic_settings_exists`, `monitor_diagnostic_setting_with_appropriate_categories`, `monitor_alert_*` | `iam_audit_logs_enabled`, `logging_log_metric_filter_and_alert_for_*`, `logging_sink_created` |
| **Log integrity (digital signatures)** | `cloudtrail_log_file_validation_enabled` (exact) | None | None |
| **Public access denied** | `s3_bucket_public_access`, `s3_bucket_public_list_acl`, `s3_bucket_public_write_acl`, `s3_account_level_public_access_blocks`, `apigateway_restapi_public`, `awslambda_function_url_public`, `awslambda_function_not_publicly_accessible`, `rds_instance_no_public_access`, `rds_snapshots_public_access`, `ec2_securitygroup_allow_ingress_from_internet_to_all_ports`, `sns_topics_not_publicly_accessible`, `sqs_queues_not_publicly_accessible` | `storage_blob_public_access_level_is_disabled`, `storage_ensure_private_endpoints_in_storage_accounts`, `containerregistry_not_publicly_accessible`, `keyvault_private_endpoints`, `app_function_not_publicly_accessible`, `aks_clusters_public_access_disabled`, `network_http_internet_access_restricted` | `cloudstorage_bucket_public_access`, `compute_instance_public_ip`, `cloudsql_instance_public_ip`, `compute_firewall_*_access_from_the_internet_allowed` |
| **IAM least privilege** | `iam_*_no_administrative_privileges`, `iam_policy_allows_privilege_escalation`, `iam_inline_policy_allows_privilege_escalation`, `iam_role_administratoraccess_policy`, `iam_group_administrator_access_policy`, `iam_user_administrator_access_policy`, `iam_policy_attached_only_to_group_or_roles`, `iam_role_cross_service_confused_deputy_prevention` | `iam_role_user_access_admin_restricted`, `iam_subscription_roles_owner_custom_not_created`, `iam_custom_role_has_permissions_to_administer_resource_locks` | `iam_sa_no_administrative_privileges`, `iam_no_service_roles_at_project_level`, `iam_role_kms_enforce_separation_of_duties`, `iam_role_sa_enforce_separation_of_duties` |
| **Password policy** | `iam_password_policy_minimum_length_14`, `iam_password_policy_uppercase`, `iam_password_policy_lowercase`, `iam_password_policy_symbol`, `iam_password_policy_number`, `iam_password_policy_expires_passwords_within_90_days_or_less`, `iam_password_policy_reuse_24` | None | None |
| **Credential rotation / unused** | `iam_rotate_access_key_90_days`, `iam_user_accesskey_unused`, `iam_user_console_access_unused` | None | `iam_sa_user_managed_key_rotate_90_days`, `iam_sa_user_managed_key_unused`, `iam_service_account_unused` |
| **VPC / flow logs** | `vpc_flow_logs_enabled` | `network_flow_log_captured_sent`, `network_watcher_enabled`, `network_flow_log_more_than_90_days` | `compute_subnet_flow_logs_enabled` |
| **Backup / DR / Multi-AZ** | `backup_vaults_exist`, `backup_plans_exist`, `backup_reportplans_exist`, `rds_instance_backup_enabled`, `rds_*_protected_by_backup_plan`, `rds_cluster_multi_az`, `neptune_cluster_backup_enabled`, `documentdb_cluster_backup_enabled`, `efs_have_backup_enabled`, `s3_bucket_cross_region_replication`, `dynamodb_table_protected_by_backup_plan` | `vm_backup_enabled`, `vm_sufficient_daily_backup_retention_period`, `storage_geo_redundant_enabled` | `cloudsql_instance_automated_backups`, `cloudstorage_bucket_log_retention_policy_lock`, `cloudstorage_bucket_sufficient_retention_period` |
| **Access analysis / discovery** | `accessanalyzer_enabled`, `accessanalyzer_enabled_without_findings` | None specific | `iam_account_access_approval_enabled`, `iam_cloud_asset_inventory_enabled` |
| **Object lock / retention** | `s3_bucket_object_lock`, `s3_bucket_object_versioning`, `s3_bucket_lifecycle_enabled`, `cloudtrail_bucket_requires_mfa_delete`, `s3_bucket_no_mfa_delete` | `storage_ensure_soft_delete_is_enabled`, `storage_blob_versioning_is_enabled`, `storage_ensure_file_shares_soft_delete_is_enabled` | `cloudstorage_bucket_log_retention_policy_lock`, `cloudstorage_bucket_soft_delete_enabled`, `cloudstorage_bucket_versioning_enabled`, `cloudstorage_bucket_sufficient_retention_period` |
| **Uniform bucket-level access** | `s3_bucket_acl_prohibited` | `storage_account_key_access_disabled`, `storage_default_to_entra_authorization_enabled` | `cloudstorage_bucket_uniform_bucket_level_access` |
| **Container vulnerability scanning** | `ecr_registry_scan_images_on_push_enabled`, `ecr_repositories_scan_vulnerabilities_in_latest_image` | `defender_container_images_scan_enabled`, `defender_container_images_resolved_vulnerabilities` | `artifacts_container_analysis_enabled`, `gcr_container_scanning_enabled` |
| **WAF / rate limiting** | `wafv2_webacl_with_rules`, `waf_*_webacl_with_rules`, `wafv2_webacl_logging_enabled`, `waf_global_webacl_logging_enabled` | None | None |
| **Deployment region restriction** | `organizations_scp_check_deny_regions` | None | None |
| **Secrets automatic rotation** | `secretsmanager_automatic_rotation_enabled`, `secretsmanager_secret_rotated_periodically` | `keyvault_rbac_secret_expiration_set`, `keyvault_non_rbac_secret_expiration_set` | None |
| **Certificate management** | `acm_certificates_expiration_check`, `acm_certificates_with_secure_key_algorithms`, `acm_certificates_transparency_logs_enabled` | `keyvault_key_expiration_set_in_non_rbac`, `keyvault_rbac_key_expiration_set`, `keyvault_non_rbac_secret_expiration_set` | None |
| **GenAI guardrails / input/output filtering** | `bedrock_guardrail_prompt_attack_filter_enabled`, `bedrock_guardrail_sensitive_information_filter_enabled`, `bedrock_agent_guardrail_enabled`, `bedrock_model_invocation_logging_enabled`, `bedrock_api_key_no_administrative_privileges`, `bedrock_api_key_no_long_term_credentials` | None | None |
| **ML dev environment security** | `sagemaker_notebook_instance_root_access_disabled`, `sagemaker_notebook_instance_without_direct_internet_access_configured`, `sagemaker_notebook_instance_vpc_settings_configured`, `sagemaker_models_vpc_settings_configured`, `sagemaker_training_jobs_vpc_settings_configured`, `sagemaker_training_jobs_network_isolation_enabled`, `sagemaker_training_jobs_volume_and_output_encryption_enabled` | None | None |
| **Threat detection / anomalous behavior** | `cloudtrail_threat_detection_enumeration`, `cloudtrail_threat_detection_privilege_escalation`, `cloudtrail_threat_detection_llm_jacking`, `guardduty_is_enabled`, `guardduty_no_high_severity_findings` | None | None |
| **Serverless private access** | `awslambda_function_inside_vpc`, `awslambda_function_not_publicly_accessible`, `awslambda_function_url_public` | `app_function_not_publicly_accessible` | None |
### What Prowler Does NOT Cover (accept MANUAL honestly)
Don't pad mappings for these — mark `Checks: []` and move on:
- **TLS 1.3 version specifically** — Prowler verifies TLS is enforced, not always the exact version
- **IANA port-protocol consistency** — no check for "protocol running on its assigned port"
- **mTLS on most Azure/GCP services** — limited to App Service client certs on Azure, nothing on GCP
- **Rate limiting** on monitoring endpoints, load balancers, serverless invocations, vector ingestion
- **Session cookie expiry** (LB stickiness)
- **HTTP header scrubbing** (Server, X-Powered-By)
- **Certificate transparency verification for imports**
- **Model version pinning, red teaming, AI quality review**
- **Vector embedding validation, dimensional constraints, ANN vs exact search**
- **Secret region replication** (cross-region residency)
- **Lifecycle cleanup policies on container registries**
- **Row-level / column-level security in data warehouses**
- **Deployment region restriction on Azure/GCP** (AWS has `organizations_scp_check_deny_regions`, others don't)
- **Cross-tenant alert silencing permissions**
- **Field-level masking in logs**
- **Managed view enforcement for database access**
- **Automatic MFA delete on all S3 buckets** (only CloudTrail bucket variant exists for some frameworks — AWS has the generic `s3_bucket_no_mfa_delete` though)
---
## Workflow C: Add a New Output Formatter
Use when a new framework needs its own CSV columns or terminal table. Follow the c5/csa/ens layout exactly:
```bash
mkdir -p prowler/lib/outputs/compliance/{framework}
touch prowler/lib/outputs/compliance/{framework}/__init__.py
```
### Step 1 — Create `{framework}.py` (table dispatcher ONLY)
Copy from `prowler/lib/outputs/compliance/c5/c5.py` and change the function name + framework string. The `diff` between your file and `c5.py` should be just those two lines. **No function docstring** — other frameworks don't have one, stay consistent.
### Step 2 — Create `models.py`
One Pydantic v2 `BaseModel` per provider. Field names become CSV column headers (public API — don't rename later without a migration).
```python
from typing import Optional
from pydantic import BaseModel
class {Framework}_AWSModel(BaseModel):
Provider: str
Description: str
AccountId: str
Region: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
# ... provider-specific columns
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
Muted: bool
```
### Step 3 — Create `{framework}_{provider}.py` for each provider
Copy from `prowler/lib/outputs/compliance/c5/c5_aws.py` etc. Contains the `{Framework}_AWS(ComplianceOutput)` class with `transform()` that walks findings and emits model rows. This file IS allowed to import `Finding`.
### Step 4 — Register everywhere
**`prowler/lib/outputs/compliance/compliance.py`** (CLI table dispatcher):
```python
from prowler.lib.outputs.compliance.{framework}.{framework} import get_{framework}_table
def display_compliance_table(...):
...
elif compliance_framework.startswith("{framework}_"):
get_{framework}_table(findings, bulk_checks_metadata,
compliance_framework, output_filename,
output_directory, compliance_overview)
```
**`prowler/__main__.py`** (CLI output writer per provider):
Add imports at the top:
```python
from prowler.lib.outputs.compliance.{framework}.{framework}_aws import {Framework}_AWS
from prowler.lib.outputs.compliance.{framework}.{framework}_azure import {Framework}_Azure
from prowler.lib.outputs.compliance.{framework}.{framework}_gcp import {Framework}_GCP
```
Add provider-specific `elif compliance_name.startswith("{framework}_"):` branches that instantiate the class and call `batch_write_data_to_file()`.
**`api/src/backend/tasks/jobs/export.py`** (API export dispatcher):
```python
from prowler.lib.outputs.compliance.{framework}.{framework}_aws import {Framework}_AWS
# ... azure, gcp
COMPLIANCE_CLASS_MAP = {
"aws": [
# ...
(lambda name: name.startswith("{framework}_"), {Framework}_AWS),
],
# ... azure, gcp
}
```
**Always use `startswith`**, never `name == "framework_aws"`. Exact match is a regression.
### Step 5 — Add tests
Create `tests/lib/outputs/compliance/{framework}/` with `{framework}_aws_test.py`, `{framework}_azure_test.py`, `{framework}_gcp_test.py`. See the test template in [references/test_template.md](references/test_template.md).
Add fixtures to `tests/lib/outputs/compliance/fixtures.py`: one `Compliance` object per provider with 1 evaluated + 1 manual requirement to exercise both code paths in `transform()`.
### Circular import warning
**The table dispatcher file (`{framework}.py`) MUST NOT import `Finding`** (directly or transitively). The cycle is:
```
compliance.compliance imports get_{framework}_table
→ {framework}.py imports ComplianceOutput
→ compliance_output imports Finding
→ finding imports get_check_compliance from compliance.compliance
→ CIRCULAR
```
Keep `{framework}.py` bare — only `colorama`, `tabulate`, `prowler.config.config`. Put anything that imports `Finding` in the per-provider `{framework}_{provider}.py` files.
---
## Conventions and Hard-Won Gotchas
These are lessons from the FINOS CCC v2025.10 sync + 172-AR audit pass (April 2026). Learn them once; save days of debugging.
1. **Per-provider files are non-negotiable.** Never collapse `{framework}_aws.py`, `{framework}_azure.py`, `{framework}_gcp.py` into a single parameterized class, no matter how DRY-tempting. Every other framework in the codebase follows the per-provider pattern and reviewers will reject the refactor. The CSV column names differ per provider — three classes is the convention.
2. **`{framework}.py` has NO function docstring.** Other frameworks don't have them. Don't add one to be "helpful".
3. **Circular import protection**: the table dispatcher file MUST NOT import `Finding` (directly or transitively). Split the code so `{framework}.py` only has `get_{framework}_table()` with bare imports, and `{framework}_{provider}.py` holds the class that needs `Finding`.
4. **`Generic_Compliance_Requirement_Attribute` is the fallback** — in the `Compliance_Requirement.Attributes` Union in `compliance_models.py`, Generic MUST be LAST because Pydantic v1 tries union members in order. Putting Generic first means every framework-specific attribute falls through to Generic and the specific model is never used.
5. **Pydantic v1 imports.** `from pydantic.v1 import BaseModel` in `compliance_models.py` — not v2. Mixing causes validation errors. Pydantic v2 is used in the CSV models (`models.py`) — that's fine because they're separate trees.
6. **`get_check_compliance()` key format** is `f"{Framework}-{Version}"` ONLY if Version is set. Empty Version → key is `"{Framework}"` (no version suffix). Tests that mock compliance dicts must match this exact format — when a framework ships with `Version: ""`, downstream code and tests break silently.
7. **CSV column names from `models.py` are public API.** Don't rename a field without migrating downstream consumers — CSV headers change.
8. **Upstream YAML multi-line scalars** (`|` block scalars) preserve newlines. Collapse to single-line with `" ".join(value.split())` before writing to JSON.
9. **Upstream catalogs can use multiple shapes.** FINOS CCC uses `control-families: [...]` in most catalogs but `controls: [...]` at the top level in `storage/object`. Any sync script must handle both or silently drop entire catalogs.
10. **Foreign-prefix AR ids.** Upstream sometimes "imports" requirements from one catalog into another by keeping the original id prefix (e.g., `CCC.AuditLog.CN08.AR01` appearing under `CCC.Logging.CN03`). Prowler's compliance model requires unique ids within a catalog — rewrite the foreign id to fit the parent control: `CCC.AuditLog.CN08.AR01` (inside `CCC.Logging.CN03`) → `CCC.Logging.CN03.AR01`.
11. **Genuine upstream id collisions.** Sometimes upstream has a real typo where two different requirements share the same id (e.g., `CCC.Core.CN14.AR02` defined twice for 30-day and 14-day backup variants). Renumber the second copy to the next free AR number. Preserve check mappings by matching on `(Section, frozenset(Applicability))` since the renumbered id won't match by id.
12. **`COMPLIANCE_CLASS_MAP` in `export.py` uses `startswith` predicates** for all modern frameworks. Exact match (`name == "ccc_aws"`) is an anti-pattern — it was present for CCC until April 2026 and was the reason CCC couldn't have versioned variants.
13. **Pre-validate every check id** against the per-provider inventory before writing the JSON. A typo silently creates an unreferenced check that will fail when findings try to map to it. The audit script MUST abort with stderr listing typos, not swallow them.
14. **REPLACE is better than PATCH** for audit decisions. Encoding every mapping explicitly makes the audit reproducible and surfaces hidden assumptions from the legacy data. A PATCH system that adds/removes is too easy to forget.
15. **When no check applies, MANUAL is correct.** Do not pad mappings with tangential checks "just in case". Prowler's compliance reports are meant to be actionable — padding them with noise breaks that. Honest manual reqs can be mapped later when new checks land.
16. **UI groups by `Attributes[0].FamilyName` and `Attributes[0].Section`.** If FamilyName has inconsistent variants within the same JSON (e.g., "Logging & Monitoring" vs "Logging and Monitoring"), the UI renders them as separate categories. Section empty → the requirement falls into an orphan control with label "". Normalize before shipping.
17. **Provider coverage is asymmetric.** AWS has dense coverage (~586 checks across 80+ services): in-transit encryption, IAM, database encryption, backup. Azure (~167 checks) and GCP (~102 checks) are thinner especially for in-transit encryption, mTLS, and ML/AI. Accept the asymmetry in mappings — don't force GCP parity where Prowler genuinely can't verify.
---
## Useful One-Liners
```bash
# Count requirements per service prefix (CCC, CIS sections, etc.)
jq -r '.Requirements[].Id | split(".")[1]' prowler/compliance/aws/ccc_aws.json | sort | uniq -c
# Find duplicate requirement IDs
jq -r '.Requirements[].Id' file.json | sort | uniq -d
# Count manual requirements (no checks)
jq '[.Requirements[] | select((.Checks | length) == 0)] | length' file.json
# List all unique check references in a framework
jq -r '.Requirements[].Checks[]' file.json | sort -u
# List all unique Sections (to spot inconsistency)
jq '[.Requirements[].Attributes[0].Section] | unique' file.json
# List all unique FamilyNames (to spot inconsistency)
jq '[.Requirements[].Attributes[0].FamilyName] | unique' file.json
# Diff requirement ids between two versions of the same framework
diff <(jq -r '.Requirements[].Id' a.json | sort) <(jq -r '.Requirements[].Id' b.json | sort)
# Find where a check id is used across all frameworks
grep -rl "my_check_name" prowler/compliance/
# Check if a Prowler check exists
find prowler/providers/aws/services -name "{check_id}.metadata.json"
# Validate a JSON with Pydantic
python -c "from prowler.lib.check.compliance_models import Compliance; print(Compliance.parse_file('prowler/compliance/aws/ccc_aws.json').Framework)"
```
---
## Best Practices
1. **Requirement IDs**: Follow the original framework numbering exactly (e.g., "1.1", "A.5.1", "T1190", "ac_2_1")
2. **Check Mapping**: Map to existing checks when possible. Use `Checks: []` for manual-only requirements — honest MANUAL beats padded coverage
2. **Check Mapping**: Map to existing checks when possible. Use `Checks: []` for manual-only requirements
3. **Completeness**: Include all framework requirements, even those without automated checks
4. **Version Control**: Include framework version in `Name` and `Version` fields. **Never leave `Version: ""`** — it breaks `get_check_compliance()` key format
4. **Version Control**: Include framework version in `Name` and `Version` fields
5. **File Naming**: Use format `{framework}_{version}_{provider}.json`
6. **Validation**: Prowler validates JSON against Pydantic models at startup invalid JSON will cause errors
7. **Pre-validate check ids** against the provider's `*.metadata.json` inventory before every commit
8. **Normalize FamilyName and Section** to avoid inconsistent UI tree branches
9. **Register everywhere**: SDK model (if needed) → `compliance.py` dispatcher → `__main__.py` CLI writer → `export.py` API map → UI mapper. Skipping any layer results in silent failures
10. **Audit, don't pad**: when reviewing mappings, apply the golden rule — the check's title/risk MUST literally describe what the requirement text says. Tangential relation doesn't count
6. **Validation**: Prowler validates JSON against Pydantic models at startup - invalid JSON will cause errors
## Commands
@@ -1005,46 +482,11 @@ prowler aws --compliance cis_5.0_aws -M csv json html
## Code References
### Layer 1 — SDK / Core
- **Compliance Models:** `prowler/lib/check/compliance_models.py` (Pydantic v1 model tree)
- **Compliance Processing / Linker:** `prowler/lib/check/compliance.py` (`get_check_compliance`, `update_checks_metadata_with_compliance`)
- **Check Utils:** `prowler/lib/check/utils.py` (`list_compliance_modules`)
### Layer 2 — JSON Catalogs
- **Framework JSONs:** `prowler/compliance/{provider}/` (auto-discovered via directory walk)
### Layer 3 — Output Formatters
- **Per-framework folders:** `prowler/lib/outputs/compliance/{framework}/`
- **Shared base class:** `prowler/lib/outputs/compliance/compliance_output.py` (`ComplianceOutput` + `batch_write_data_to_file`)
- **CLI table dispatcher:** `prowler/lib/outputs/compliance/compliance.py` (`display_compliance_table`)
- **Finding model:** `prowler/lib/outputs/finding.py` (**do not import transitively from table dispatcher files — circular import**)
- **CLI writer:** `prowler/__main__.py` (per-provider `elif compliance_name.startswith(...)` branches that instantiate per-provider classes)
### Layer 4 — API / UI
- **API lazy loader:** `api/src/backend/api/compliance.py` (`LazyComplianceTemplate`, `LazyChecksMapping`)
- **API export dispatcher:** `api/src/backend/tasks/jobs/export.py` (`COMPLIANCE_CLASS_MAP` with `startswith` predicates)
- **UI framework router:** `ui/lib/compliance/compliance-mapper.ts`
- **UI per-framework mapper:** `ui/lib/compliance/{framework}.tsx`
- **UI detail panel:** `ui/components/compliance/compliance-custom-details/{framework}-details.tsx`
- **UI types:** `ui/types/compliance.ts`
- **UI icon:** `ui/components/icons/compliance/{framework}.svg` + registration in `IconCompliance.tsx`
### Tests
- **Output formatter tests:** `tests/lib/outputs/compliance/{framework}/{framework}_{provider}_test.py`
- **Shared fixtures:** `tests/lib/outputs/compliance/fixtures.py`
- **Compliance Models:** `prowler/lib/check/compliance_models.py`
- **Compliance Processing:** `prowler/lib/check/compliance.py`
- **Compliance Output:** `prowler/lib/outputs/compliance/`
## Resources
- **JSON Templates:** See [assets/](assets/) for framework JSON templates (cis, ens, iso27001, mitre_attack, prowler_threatscore, generic)
- **Config-driven compliance sync** (any upstream-backed framework):
- [assets/sync_framework.py](assets/sync_framework.py) — generic runner. Loads a YAML config, dynamically imports the declared parser, applies generic post-processing (id uniqueness safety net, `FamilyName` normalization, legacy check-mapping preservation with config-driven fallback keys), and writes the provider JSONs with Pydantic post-validation. Framework-agnostic — works for any compliance framework.
- [assets/configs/ccc.yaml](assets/configs/ccc.yaml) — canonical config example (FINOS CCC v2025.10). Copy and adapt for new frameworks.
- [assets/parsers/finos_ccc.py](assets/parsers/finos_ccc.py) — FINOS CCC YAML parser. Handles both upstream shapes (`control-families` and top-level `controls`), foreign-prefix AR rewriting, and genuine collision renumbering. Exposes `parse_upstream(config) -> list[dict]`.
- [assets/parsers/](assets/parsers/) — add new parser modules here for unfamiliar upstream formats (NIST OSCAL JSON, MITRE STIX, CIS Benchmarks, etc.). Each parser is a `{name}.py` file implementing `parse_upstream(config) -> list[dict]` with guaranteed-unique ids.
- **Reusable audit tooling** (added April 2026 after the FINOS CCC v2025.10 sync):
- [assets/audit_framework_template.py](assets/audit_framework_template.py) — explicit REPLACE decision ledger with pre-validation against the per-provider inventory. Drop-in template for auditing any framework.
- [assets/query_checks.py](assets/query_checks.py) — keyword/service/id query helper over `/tmp/checks_{provider}.json`.
- [assets/dump_section.py](assets/dump_section.py) — dumps every AR for a given id prefix across all 3 providers with current check mappings.
- [assets/build_inventory.py](assets/build_inventory.py) — generates `/tmp/checks_{provider}.json` from `*.metadata.json` files.
- **Templates:** See [assets/](assets/) for framework JSON templates
- **Documentation:** See [references/compliance-docs.md](references/compliance-docs.md) for additional resources
- **Related skill:** [prowler-compliance-review](../prowler-compliance-review/SKILL.md) — PR review checklist and validator script for compliance framework PRs
@@ -1,207 +0,0 @@
#!/usr/bin/env python3
"""
Cloud-auditor pass template for any Prowler compliance framework.
Encode explicit REPLACE decisions per (requirement_id, provider) pair below.
Each decision FULLY overwrites the legacy Checks list for that requirement.
Workflow:
1. Run build_inventory.py first to cache per-provider check metadata.
2. Run dump_section.py to see current mappings for the catalog you're auditing.
3. Fill in DECISIONS below with explicit check lists.
4. Run this script it pre-validates every check id against the inventory
and aborts with stderr listing typos before writing.
Decision rules (apply as a hostile cloud auditor):
- The Prowler check's title/risk MUST literally describe what the AR text says.
"Related" is not enough.
- If no check actually addresses the requirement, leave `[]` (= MANUAL).
HONEST MANUAL is worth more than padded coverage.
- Missing provider key = leave the legacy mapping untouched.
- Empty list `[]` = explicitly MANUAL (overwrites legacy).
Usage:
# 1. Copy this file to /tmp/audit_<framework>.py and fill in DECISIONS
# 2. Edit FRAMEWORK_KEY below to match your framework file naming
# 3. Run:
python /tmp/audit_<framework>.py
"""
from __future__ import annotations
import json
import sys
from pathlib import Path
# ---------------------------------------------------------------------------
# Configure for your framework
# ---------------------------------------------------------------------------
# Framework file basename inside prowler/compliance/{provider}/.
# If your framework is called "cis_5.0_aws.json", FRAMEWORK_KEY is "cis_5.0".
# If the file is "ccc_aws.json", FRAMEWORK_KEY is "ccc".
FRAMEWORK_KEY = "ccc"
# Which providers to apply decisions to.
PROVIDERS = ["aws", "azure", "gcp"]
PROWLER_DIR = Path("prowler/compliance")
CHECK_INV = {prov: Path(f"/tmp/checks_{prov}.json") for prov in PROVIDERS}
# ---------------------------------------------------------------------------
# DECISIONS — encode one entry per requirement you want to audit
# ---------------------------------------------------------------------------
# DECISIONS[requirement_id][provider] = list[str] of check ids
# See SKILL.md → "Audit Reference Table: Requirement Text → Prowler Checks"
# for a comprehensive mapping cheat sheet built from a 172-AR CCC audit.
DECISIONS: dict[str, dict[str, list[str]]] = {}
# ---- Example entries (delete and replace with your own) ----
# Example 1: TLS in transit enforced (non-SSH traffic)
# DECISIONS["CCC.Core.CN01.AR01"] = {
# "aws": [
# "cloudfront_distributions_https_enabled",
# "cloudfront_distributions_origin_traffic_encrypted",
# "s3_bucket_secure_transport_policy",
# "elbv2_ssl_listeners",
# "rds_instance_transport_encrypted",
# "kafka_cluster_in_transit_encryption_enabled",
# "redshift_cluster_in_transit_encryption_enabled",
# "opensearch_service_domains_https_communications_enforced",
# ],
# "azure": [
# "storage_secure_transfer_required_is_enabled",
# "app_minimum_tls_version_12",
# "postgresql_flexible_server_enforce_ssl_enabled",
# "sqlserver_recommended_minimal_tls_version",
# ],
# "gcp": [
# "cloudsql_instance_ssl_connections",
# ],
# }
# Example 2: MANUAL — no Prowler check exists
# DECISIONS["CCC.Core.CN01.AR07"] = {
# "aws": [], # no IANA port/protocol check exists in Prowler
# "azure": [],
# "gcp": [],
# }
# Example 3: Reuse a decision for multiple sibling ARs
# DECISIONS["CCC.ObjStor.CN05.AR02"] = DECISIONS["CCC.ObjStor.CN05.AR01"]
# ---------------------------------------------------------------------------
# Driver — do not edit below
# ---------------------------------------------------------------------------
def load_inventory(provider: str) -> dict:
path = CHECK_INV[provider]
if not path.exists():
raise SystemExit(
f"Check inventory missing: {path}\n"
f"Run: python skills/prowler-compliance/assets/build_inventory.py {provider}"
)
with open(path) as f:
return json.load(f)
def resolve_json_path(provider: str) -> Path:
"""Resolve the JSON file path for a given provider.
Handles both shapes: {FRAMEWORK_KEY}_{provider}.json (ccc_aws.json) and
cases where FRAMEWORK_KEY already contains the provider suffix.
"""
candidates = [
PROWLER_DIR / provider / f"{FRAMEWORK_KEY}_{provider}.json",
PROWLER_DIR / provider / f"{FRAMEWORK_KEY}.json",
]
for c in candidates:
if c.exists():
return c
raise SystemExit(
f"Could not find framework JSON for provider={provider} "
f"with FRAMEWORK_KEY={FRAMEWORK_KEY}. Tried: {candidates}"
)
def plan_for_provider(
provider: str,
) -> tuple[Path, dict, tuple[int, int, int], list[tuple[str, str]]]:
"""Build the updated JSON for one provider without writing it.
Returns (path, mutated_data, (touched, added, removed), unknowns).
Writing is deferred to a second pass so that a typo in any provider
aborts the whole run before any file on disk changes.
"""
path = resolve_json_path(provider)
with open(path) as f:
data = json.load(f)
inv = load_inventory(provider)
touched = 0
add_count = 0
rm_count = 0
unknown: list[tuple[str, str]] = []
for req in data["Requirements"]:
rid = req["Id"]
if rid not in DECISIONS or provider not in DECISIONS[rid]:
continue
new_checks = list(dict.fromkeys(DECISIONS[rid][provider]))
for c in new_checks:
if c not in inv:
unknown.append((rid, c))
before = set(req.get("Checks") or [])
after = set(new_checks)
rm_count += len(before - after)
add_count += len(after - before)
req["Checks"] = new_checks
touched += 1
return path, data, (touched, add_count, rm_count), unknown
def main() -> int:
if not DECISIONS:
print("No DECISIONS encoded. Fill in the DECISIONS dict and re-run.")
return 1
print(f"Applying {len(DECISIONS)} decisions to framework '{FRAMEWORK_KEY}'...")
# Pass 1: validate every provider before touching disk. A typo in any
# provider must abort the run before ANY file has been rewritten.
plans: list[tuple[str, Path, dict, tuple[int, int, int]]] = []
all_unknown: list[tuple[str, str, str]] = []
for provider in PROVIDERS:
path, data, counts, unknown = plan_for_provider(provider)
for rid, c in unknown:
all_unknown.append((provider, rid, c))
plans.append((provider, path, data, counts))
if all_unknown:
print("\n!! UNKNOWN CHECK IDS (typos?):", file=sys.stderr)
for provider, rid, c in all_unknown:
print(f" {provider} {rid} -> {c}", file=sys.stderr)
print(
"\nAborting: fix the check ids above and re-run. "
"No files were modified.",
file=sys.stderr,
)
return 2
# Pass 2: all providers validated cleanly — write.
for provider, path, data, (touched, added, removed) in plans:
with open(path, "w") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
f.write("\n")
print(
f" {provider}: touched={touched} added={added} removed={removed}"
)
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -1,100 +0,0 @@
#!/usr/bin/env python3
"""
Build a per-provider check inventory by scanning Prowler's check metadata files.
Outputs one JSON per provider at /tmp/checks_{provider}.json with the shape:
{
"check_id": {
"service": "...",
"subservice": "...",
"resource": "...",
"severity": "...",
"title": "...",
"description": "...",
"risk": "..."
},
...
}
This is the reference used by audit_framework_template.py for pre-validation
(every check id in the audit ledger must exist in the inventory) and by
query_checks.py for keyword/service lookup.
Usage:
python skills/prowler-compliance/assets/build_inventory.py
# Or for a specific provider:
python skills/prowler-compliance/assets/build_inventory.py aws
Output:
/tmp/checks_{provider}.json for every provider discovered under
prowler/providers/ with a services/ directory.
"""
from __future__ import annotations
import json
import sys
from pathlib import Path
PROVIDERS_ROOT = Path("prowler/providers")
def discover_providers() -> list[str]:
"""Return every provider that currently has a services/ directory.
Derived from the filesystem so new providers are picked up automatically
and stale hard-coded lists cannot drift from the repo.
"""
if not PROVIDERS_ROOT.exists():
return []
return sorted(
p.name
for p in PROVIDERS_ROOT.iterdir()
if p.is_dir() and (p / "services").is_dir()
)
def build_for_provider(provider: str) -> dict:
inventory: dict[str, dict] = {}
base = Path(f"prowler/providers/{provider}/services")
if not base.exists():
print(f" skip {provider}: no services directory", file=sys.stderr)
return inventory
for meta_path in base.rglob("*.metadata.json"):
try:
with open(meta_path) as f:
data = json.load(f)
except Exception as exc:
print(f" warn: cannot parse {meta_path}: {exc}", file=sys.stderr)
continue
cid = data.get("CheckID") or meta_path.stem.replace(".metadata", "")
inventory[cid] = {
"service": data.get("ServiceName", ""),
"subservice": data.get("SubServiceName", ""),
"resource": data.get("ResourceType", ""),
"severity": data.get("Severity", ""),
"title": data.get("CheckTitle", ""),
"description": data.get("Description", ""),
"risk": data.get("Risk", ""),
}
return inventory
def main() -> int:
providers = sys.argv[1:] or discover_providers()
if not providers:
print(
f"error: no providers found under {PROVIDERS_ROOT}/",
file=sys.stderr,
)
return 1
for provider in providers:
inv = build_for_provider(provider)
out_path = Path(f"/tmp/checks_{provider}.json")
with open(out_path, "w") as f:
json.dump(inv, f, indent=2)
print(f" {provider}: {len(inv)} checks → {out_path}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -1,120 +0,0 @@
# FINOS Common Cloud Controls (CCC) sync config for sync_framework.py.
#
# Usage:
# python skills/prowler-compliance/assets/sync_framework.py \
# skills/prowler-compliance/assets/configs/ccc.yaml
#
# Prerequisite: run the upstream fetch step from SKILL.md Workflow A Step 1 to
# populate upstream.dir with the raw FINOS catalog YAML files.
framework:
name: CCC
display_name: Common Cloud Controls Catalog (CCC)
version: v2025.10
# The {provider_display} placeholder is replaced at output time with the
# per-provider display string from the providers list below.
description_template: "Common Cloud Controls Catalog (CCC) for {provider_display}"
providers:
- key: aws
display: AWS
- key: azure
display: Azure
- key: gcp
display: GCP
output:
# Supported placeholders: {provider}, {framework}, {version}.
# For versioned frameworks like CIS the template would be
# "prowler/compliance/{provider}/cis_{version}_{provider}.json".
path_template: "prowler/compliance/{provider}/ccc_{provider}.json"
upstream:
# Directory containing the cached FINOS catalog YAMLs. Populate via
# SKILL.md Workflow A Step 1 (gh api raw download commands).
dir: /tmp/ccc_upstream
fetch_docs: "See SKILL.md Workflow A Step 1 for gh api fetch commands"
parser:
# Name of the parser module under parsers/ (loaded dynamically by the
# runner). For FINOS CCC YAML this is always finos_ccc.
module: finos_ccc
# FINOS CCC catalog files in load order. Core first so its ARs render
# first in the output JSON.
catalog_files:
- core_ccc.yaml
- management_auditlog.yaml
- management_logging.yaml
- management_monitoring.yaml
- storage_object.yaml
- networking_loadbalancer.yaml
- networking_vpc.yaml
- crypto_key.yaml
- crypto_secrets.yaml
- database_warehouse.yaml
- database_vector.yaml
- database_relational.yaml
- devtools_build.yaml
- devtools_container-registry.yaml
- identity_iam.yaml
- ai-ml_gen-ai.yaml
- ai-ml_mlde.yaml
- app-integration_message.yaml
- compute_serverless-computing.yaml
# Shape-2 catalogs (storage/object) reference the family via id only
# (e.g. "CCC.ObjStor.Data") with no human-readable title or description
# in the YAML. Map the suffix (after the last dot) to a canonical title
# and description so the generated JSON has consistent FamilyName fields
# regardless of upstream shape.
family_id_title:
Data: Data
IAM: Identity and Access Management
Identity: Identity and Access Management
Encryption: Encryption
Logging: Logging and Monitoring
Network: Network Security
Availability: Availability
Integrity: Integrity
Confidentiality: Confidentiality
family_id_description:
Data: "The Data control family ensures the confidentiality, integrity, availability, and sovereignty of data across its lifecycle."
IAM: "The Identity and Access Management control family ensures that only trusted and authenticated entities can access resources."
post_processing:
# Collapse FamilyName variants that appear inconsistently across upstream
# catalogs. The Prowler UI groups by Attributes[0].FamilyName exactly,
# so each variant would otherwise become a separate tree branch.
family_name_normalization:
"Logging & Monitoring": "Logging and Monitoring"
"Logging and Metrics Publication": "Logging and Monitoring"
# Preserve existing Checks lists from the legacy Prowler JSON when
# regenerating. The runner builds two lookup tables from the legacy
# output: a primary index by Id, and fallback indexes composed of
# attribute field names.
#
# primary_key: the top-level requirement field to use as the primary
# lookup key (almost always "Id")
# fallback_keys: a list of composite keys. Each composite key is a list
# of Attributes[0] field names to join into a tuple. List-valued fields
# (like Applicability) are frozen to frozenset so the tuple is hashable.
#
# CCC uses (Section, Applicability) because Applicability is a CCC-only
# top-level attribute field. CIS would use (Section, Profile). NIST would
# use (ItemId,). The fallback is how renumbered or rewritten ids still
# recover their check mappings.
#
# legacy_path_template (optional): path to read legacy Checks FROM.
# Defaults to output.path_template, which is correct for unversioned
# frameworks (like CCC) where regeneration overwrites the same file.
# For versioned frameworks that write to a new file on each version
# bump (e.g. cis_5.1_aws.json while the legacy mappings live in
# cis_5.0_aws.json), set this to the previous-version path so Checks
# are preserved instead of lost:
# legacy_path_template: "prowler/compliance/{provider}/cis_5.0_{provider}.json"
check_preservation:
primary_key: Id
fallback_keys:
- [Section, Applicability]
@@ -1,92 +0,0 @@
#!/usr/bin/env python3
"""
Dump every requirement of a compliance framework for a given id prefix across
providers, with their current Check mappings.
Useful for reviewing a whole control family in one pass before encoding audit
decisions in audit_framework_template.py.
Usage:
# Dump all CCC.Core requirements across aws/azure/gcp
python skills/prowler-compliance/assets/dump_section.py ccc "CCC.Core."
# Dump all CIS 5.0 section 1 requirements for AWS only
python skills/prowler-compliance/assets/dump_section.py cis_5.0_aws "1."
Arguments:
framework_key: file prefix inside prowler/compliance/{provider}/ without
the provider suffix. Examples:
- "ccc" loads ccc_aws.json / ccc_azure.json / ccc_gcp.json
- "cis_5.0_aws" loads only that one file
- "iso27001_2022" loads all providers
id_prefix: Requirement id prefix to filter by (e.g. "CCC.Core.",
"1.1.", "A.5.").
"""
from __future__ import annotations
import json
import sys
from collections import defaultdict
from pathlib import Path
PROWLER_COMPLIANCE_DIR = Path("prowler/compliance")
def main() -> int:
if len(sys.argv) < 3:
print(__doc__)
return 1
framework_key = sys.argv[1]
id_prefix = sys.argv[2]
# Find matching JSON files across all providers
candidates: list[tuple[str, Path]] = []
for prov_dir in sorted(PROWLER_COMPLIANCE_DIR.iterdir()):
if not prov_dir.is_dir():
continue
for json_path in prov_dir.glob("*.json"):
stem = json_path.stem
if stem == framework_key or stem.startswith(f"{framework_key}_") \
or stem == f"{framework_key}_{prov_dir.name}":
candidates.append((prov_dir.name, json_path))
if not candidates:
print(f"No files matching '{framework_key}'", file=sys.stderr)
return 2
discovered_providers = sorted({prov for prov, _ in candidates})
by_id: dict[str, dict] = defaultdict(dict)
for prov, path in candidates:
with open(path) as f:
data = json.load(f)
for req in data["Requirements"]:
if req["Id"].startswith(id_prefix):
by_id[req["Id"]][prov] = {
"desc": req.get("Description", ""),
"sec": (req.get("Attributes") or [{}])[0].get("Section", ""),
"obj": (req.get("Attributes") or [{}])[0].get(
"SubSectionObjective", ""
),
"checks": req.get("Checks") or [],
}
for ar_id in sorted(by_id):
rows = by_id[ar_id]
sample = next(iter(rows.values()))
print(f"\n### {ar_id}")
print(f" desc: {sample['desc']}")
if sample["sec"]:
print(f" sec : {sample['sec']}")
if sample["obj"]:
print(f" obj : {sample['obj']}")
for prov in discovered_providers:
if prov in rows:
checks = rows[prov]["checks"]
print(f" {prov}: ({len(checks)}) {checks}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -1,223 +0,0 @@
"""
FINOS Common Cloud Controls (CCC) YAML parser.
Reads cached upstream YAML files and emits Prowler-format requirements
(``{Id, Description, Attributes: [...], Checks: []}``). This module is
agnostic to providers, JSON output paths, framework metadata and legacy
check-mapping preservation those are handled by ``sync_framework.py``.
Contract
--------
``parse_upstream(config: dict) -> list[dict]``
Returns a list of Prowler-format requirement dicts with **guaranteed
unique ids**. Foreign-prefix AR rewriting and genuine collision
renumbering both happen inside this module the runner treats id
uniqueness as a contract violation, not as something to fix.
Config keys consumed
--------------------
This parser reads the following config entries (the rest of the config is
opaque to it):
- ``upstream.dir`` directory containing the cached YAMLs
- ``parser.catalog_files`` ordered list of YAML filenames to load
- ``parser.family_id_title`` suffix canonical family title (shape 2)
- ``parser.family_id_description`` suffix family description (shape 2)
Upstream shapes
---------------
FINOS CCC catalogs come in two shapes:
1. ``control-families: [{title, description, controls: [...]}]``
(used by most catalogs)
2. ``controls: [{id, family: "CCC.X.Y", ...}]`` (no families wrapper; used
by ``storage/object``). The ``family`` field references a family id with
no human-readable title in the file the title/description come from
``config.parser.family_id_title`` / ``family_id_description``.
Id rewriting rules
------------------
- **Foreign-prefix rewriting**: upstream intentionally aliases requirements
across catalogs by keeping the original prefix (e.g. ``CCC.AuditLog.CN08.AR01``
appears nested under ``CCC.Logging.CN03``). Prowler requires unique ids
within a catalog file, so we rename the AR to fit its parent control:
``CCC.Logging.CN03.AR01``. See ``rewrite_ar_id()``.
- **Genuine collision renumbering**: sometimes upstream has a real typo
where two distinct requirements share the same id (e.g.
``CCC.Core.CN14.AR02`` appears twice for 30-day and 14-day backup variants).
The second copy is renumbered to the next free AR number within the
control. See the ``seen_ids`` logic in ``emit_requirement()``.
"""
from __future__ import annotations
from pathlib import Path
import yaml
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def clean(value: str | None) -> str:
"""Trim and collapse internal whitespace/newlines into single spaces.
Upstream YAML uses ``|`` block scalars that preserve newlines; Prowler
stores descriptions as single-line text.
"""
if not value:
return ""
return " ".join(value.split())
def flatten_mappings(mappings):
"""Convert upstream ``{reference-id, entries: [{reference-id, ...}]}`` to
Prowler's ``{ReferenceId, Identifiers: [...]}``.
"""
if not mappings:
return []
out = []
for m in mappings:
ids = []
for entry in m.get("entries") or []:
eid = entry.get("reference-id")
if eid:
ids.append(eid)
out.append({"ReferenceId": m.get("reference-id", ""), "Identifiers": ids})
return out
def ar_prefix(ar_id: str) -> str:
"""Return the first three dot-segments of an AR id (the parent control).
e.g. ``CCC.Core.CN01.AR01`` -> ``CCC.Core.CN01``.
"""
return ".".join(ar_id.split(".")[:3])
def rewrite_ar_id(parent_control_id: str, original_ar_id: str, ar_index: int) -> str:
"""If an AR's id doesn't share its parent control's prefix, rename it.
Example
-------
parent ``CCC.Logging.CN03`` + AR id ``CCC.AuditLog.CN08.AR01`` with
index 0 -> ``CCC.Logging.CN03.AR01``.
"""
if ar_prefix(original_ar_id) == parent_control_id:
return original_ar_id
return f"{parent_control_id}.AR{ar_index + 1:02d}"
def emit_requirement(
control: dict,
family_name: str,
family_desc: str,
seen_ids: set[str],
requirements: list[dict],
) -> None:
"""Translate one FINOS control + its assessment-requirements into
Prowler-format requirement dicts and append them to ``requirements``.
Applies foreign-prefix rewriting and genuine-collision renumbering so
the final list is guaranteed to have unique ids.
"""
control_id = clean(control.get("id"))
control_title = clean(control.get("title"))
section = f"{control_id} {control_title}".strip()
objective = clean(control.get("objective"))
threat_mappings = flatten_mappings(control.get("threat-mappings"))
guideline_mappings = flatten_mappings(control.get("guideline-mappings"))
ars = control.get("assessment-requirements") or []
for idx, ar in enumerate(ars):
raw_id = clean(ar.get("id"))
if not raw_id:
continue
new_id = rewrite_ar_id(control_id, raw_id, idx)
# Renumber on genuine upstream collision (find next free AR number)
if new_id in seen_ids:
base = ".".join(new_id.split(".")[:-1])
n = 1
while f"{base}.AR{n:02d}" in seen_ids:
n += 1
new_id = f"{base}.AR{n:02d}"
seen_ids.add(new_id)
requirements.append(
{
"Id": new_id,
"Description": clean(ar.get("text")),
"Attributes": [
{
"FamilyName": family_name,
"FamilyDescription": family_desc,
"Section": section,
"SubSection": "",
"SubSectionObjective": objective,
"Applicability": list(ar.get("applicability") or []),
"Recommendation": clean(ar.get("recommendation")),
"SectionThreatMappings": threat_mappings,
"SectionGuidelineMappings": guideline_mappings,
}
],
"Checks": [],
}
)
# ---------------------------------------------------------------------------
# Public entry point
# ---------------------------------------------------------------------------
def parse_upstream(config: dict) -> list[dict]:
"""Walk upstream YAMLs and emit Prowler-format requirements.
Handles both top-level shapes (``control-families`` and ``controls``).
Ids are guaranteed unique in the returned list.
"""
upstream_dir = Path(config["upstream"]["dir"])
parser_cfg = config.get("parser") or {}
catalog_files = parser_cfg.get("catalog_files") or []
family_id_title = parser_cfg.get("family_id_title") or {}
family_id_description = parser_cfg.get("family_id_description") or {}
requirements: list[dict] = []
seen_ids: set[str] = set()
for filename in catalog_files:
path = upstream_dir / filename
if not path.exists():
# parser.catalog_files is the closed set of upstream catalogs
# that define the framework. Silently skipping a missing file
# would emit valid-looking JSON with part of the framework
# dropped, defeating the whole point of a canonical sync.
raise FileNotFoundError(
f"upstream catalog file not found: {path}\n"
f" hint: refresh the upstream cache (see SKILL.md Workflow A "
f"Step 1), or remove {filename!r} from parser.catalog_files "
f"if it has been retired upstream."
)
with open(path) as f:
doc = yaml.safe_load(f) or {}
# Shape 1: control-families wrapper
for family in doc.get("control-families") or []:
family_name = clean(family.get("title"))
family_desc = clean(family.get("description"))
for control in family.get("controls") or []:
emit_requirement(
control, family_name, family_desc, seen_ids, requirements
)
# Shape 2: top-level controls with family reference id
for control in doc.get("controls") or []:
family_ref = clean(control.get("family"))
suffix = family_ref.split(".")[-1] if family_ref else ""
family_name = family_id_title.get(suffix, suffix or "Data")
family_desc = family_id_description.get(suffix, "")
emit_requirement(
control, family_name, family_desc, seen_ids, requirements
)
return requirements
@@ -1,86 +0,0 @@
#!/usr/bin/env python3
"""
Keyword/service/id lookup over a Prowler check inventory produced by
build_inventory.py.
Usage:
# Keyword AND-search across id + title + risk + description
python skills/prowler-compliance/assets/query_checks.py aws encryption transit
# Show all checks for a service
python skills/prowler-compliance/assets/query_checks.py aws --service iam
# Show full metadata for one check id
python skills/prowler-compliance/assets/query_checks.py aws --id kms_cmk_rotation_enabled
"""
from __future__ import annotations
import json
import sys
def main() -> int:
if len(sys.argv) < 3:
print(__doc__)
return 1
provider = sys.argv[1]
try:
with open(f"/tmp/checks_{provider}.json") as f:
inv = json.load(f)
except FileNotFoundError:
print(
f"No inventory for {provider}. Run build_inventory.py first.",
file=sys.stderr,
)
return 2
if sys.argv[2] == "--service":
if len(sys.argv) < 4:
print("usage: --service <service_name>")
return 1
svc = sys.argv[3]
hits = [cid for cid in sorted(inv) if inv[cid].get("service") == svc]
for cid in hits:
print(f" {cid}")
print(f" {inv[cid].get('title', '')}")
print(f"\n{len(hits)} checks in service '{svc}'")
elif sys.argv[2] == "--id":
if len(sys.argv) < 4:
print("usage: --id <check_id>")
return 1
cid = sys.argv[3]
if cid not in inv:
print(f"NOT FOUND: {cid}")
return 3
m = inv[cid]
print(f"== {cid} ==")
print(f"service : {m.get('service')}")
print(f"severity: {m.get('severity')}")
print(f"resource: {m.get('resource')}")
print(f"title : {m.get('title')}")
print(f"desc : {m.get('description', '')[:500]}")
print(f"risk : {m.get('risk', '')[:500]}")
else:
keywords = [k.lower() for k in sys.argv[2:]]
hits = 0
for cid in sorted(inv):
m = inv[cid]
blob = " ".join(
[
cid,
m.get("title", ""),
m.get("risk", ""),
m.get("description", ""),
]
).lower()
if all(k in blob for k in keywords):
hits += 1
print(f" {cid} [{m.get('service', '')}]")
print(f" {m.get('title', '')[:120]}")
print(f"\n{hits} matches for {' + '.join(keywords)}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -1,536 +0,0 @@
#!/usr/bin/env python3
"""
Generic, config-driven compliance framework sync runner.
Usage:
python skills/prowler-compliance/assets/sync_framework.py \
skills/prowler-compliance/assets/configs/ccc.yaml
Pipeline:
1. Load and validate the YAML config (fail fast on missing or empty
required fields notably ``framework.version``, which silently
breaks ``get_check_compliance()`` key construction if empty).
2. Dynamically import the parser module declared in ``parser.module``
(resolved as ``parsers.{name}`` under this script's directory).
3. Call ``parser.parse_upstream(config) -> list[dict]`` to get raw
Prowler-format requirements. The parser owns all upstream-format
quirks (foreign-prefix AR rewriting, collision renumbering, shape
handling) and MUST return ids that are unique within the returned
list.
4. **Safety net**: assert id uniqueness. The runner raises
``ValueError`` on any duplicate it does NOT silently renumber,
because mutating a canonical upstream id (e.g. CIS ``1.1.1`` or
NIST ``AC-2(1)``) would be catastrophic.
5. Apply generic ``FamilyName`` normalization from
``post_processing.family_name_normalization`` (optional).
6. Preserve legacy ``Checks`` lists from the existing Prowler JSON
using a config-driven primary key + fallback key chain. CCC uses
``(Section, Applicability)`` as fallback; CIS would use
``(Section, Profile)``; NIST would use ``(ItemId,)``.
For versioned frameworks (e.g. ``cis_<version>_<provider>.json``)
where a version bump writes to a brand-new file, set
``post_processing.check_preservation.legacy_path_template`` to
point at the previous version's file so its Checks are preserved
instead of silently lost. Defaults to ``output.path_template``
when omitted, which is correct for unversioned frameworks.
7. Wrap each provider's requirements in the framework metadata dict
built from the config templates.
8. Write each provider's JSON to the path resolved from
``output.path_template`` (supports ``{framework}``, ``{version}``
and ``{provider}`` placeholders).
9. Pydantic-validate the written JSON via ``Compliance.parse_file()``
and report the load counts per provider.
The runner is strictly generic it never mentions CCC, knows nothing
about YAML shapes, and can handle any upstream-backed framework given a
parser module and a config file.
"""
from __future__ import annotations
import importlib
import json
import sys
from pathlib import Path
from typing import Any
import yaml
# Make sibling `parsers/` package importable regardless of the runner's
# invocation directory.
_SCRIPT_DIR = Path(__file__).resolve().parent
if str(_SCRIPT_DIR) not in sys.path:
sys.path.insert(0, str(_SCRIPT_DIR))
# ---------------------------------------------------------------------------
# Config loading and validation
# ---------------------------------------------------------------------------
class ConfigError(ValueError):
"""Raised when the sync config is malformed or missing required fields."""
def _require(cfg: dict, dotted_path: str) -> Any:
"""Fetch a dotted-path key from nested dicts. Raises ConfigError on
missing or empty values (empty-string, empty-list, None)."""
current: Any = cfg
parts = dotted_path.split(".")
for i, part in enumerate(parts):
if not isinstance(current, dict) or part not in current:
raise ConfigError(f"config: missing required field '{dotted_path}'")
current = current[part]
if current in ("", None, [], {}):
raise ConfigError(f"config: field '{dotted_path}' must not be empty")
return current
def load_config(path: Path) -> dict:
if not path.exists():
raise ConfigError(f"config file not found: {path}")
with open(path) as f:
cfg = yaml.safe_load(f) or {}
if not isinstance(cfg, dict):
raise ConfigError(f"config root must be a mapping, got {type(cfg).__name__}")
# Required fields — fail fast. Empty Version in particular silently
# breaks get_check_compliance() key construction.
_require(cfg, "framework.name")
_require(cfg, "framework.display_name")
_require(cfg, "framework.version")
_require(cfg, "framework.description_template")
_require(cfg, "providers")
_require(cfg, "output.path_template")
_require(cfg, "upstream.dir")
_require(cfg, "parser.module")
_require(cfg, "post_processing.check_preservation.primary_key")
providers = cfg["providers"]
if not isinstance(providers, list) or not providers:
raise ConfigError("config: 'providers' must be a non-empty list")
for idx, p in enumerate(providers):
if not isinstance(p, dict) or "key" not in p or "display" not in p:
raise ConfigError(
f"config: providers[{idx}] must have 'key' and 'display' fields"
)
return cfg
# ---------------------------------------------------------------------------
# Parser loading
# ---------------------------------------------------------------------------
def load_parser(parser_module_name: str):
try:
return importlib.import_module(f"parsers.{parser_module_name}")
except ImportError as exc:
raise ConfigError(
f"cannot import parser 'parsers.{parser_module_name}': {exc}"
) from exc
# ---------------------------------------------------------------------------
# Post-processing: id uniqueness safety net
# ---------------------------------------------------------------------------
def assert_unique_ids(requirements: list[dict]) -> None:
"""Enforce the parser contract: every requirement must have a unique Id.
The runner never renumbers silently a duplicate is a parser bug.
"""
seen: set[str] = set()
dups: list[str] = []
for req in requirements:
rid = req.get("Id")
if not rid:
raise ValueError(f"requirement missing Id: {req}")
if rid in seen:
dups.append(rid)
seen.add(rid)
if dups:
raise ValueError(
f"parser returned duplicate requirement ids: {sorted(set(dups))}"
)
# ---------------------------------------------------------------------------
# Post-processing: FamilyName normalization
# ---------------------------------------------------------------------------
def normalize_family_names(requirements: list[dict], norm_map: dict[str, str]) -> None:
"""Apply ``Attributes[0].FamilyName`` normalization in place."""
if not norm_map:
return
for req in requirements:
for attr in req.get("Attributes") or []:
name = attr.get("FamilyName")
if name in norm_map:
attr["FamilyName"] = norm_map[name]
# ---------------------------------------------------------------------------
# Post-processing: legacy check-mapping preservation
# ---------------------------------------------------------------------------
def _freeze(value: Any) -> Any:
"""Make a value hashable for use in composite lookup keys.
Lists become frozensets (order-insensitive match). Scalars pass through.
"""
if isinstance(value, list):
return frozenset(value)
return value
def _build_fallback_key(attrs: dict, field_names: list[str]) -> tuple | None:
"""Build a composite tuple key from the given attribute field names.
Returns None if any field is missing or falsy that key will be
skipped (the lookup table just won't have an entry for it).
"""
parts = []
for name in field_names:
if name not in attrs:
return None
value = attrs[name]
if value in ("", None, [], {}):
return None
parts.append(_freeze(value))
return tuple(parts)
def load_legacy_check_maps(
legacy_path: Path,
primary_key: str,
fallback_keys: list[list[str]],
) -> tuple[dict[str, list[str]], list[dict[tuple, list[str]]]]:
"""Read the existing Prowler JSON and build lookup tables for check
preservation.
Fails fast on ambiguous preservation keys. If two distinct legacy
requirements share the same primary value or the same fallback tuple,
merging their ``Checks`` silently would corrupt the preserved mapping
for unrelated requirements. Raises ``ValueError`` listing every
conflict so the user can either dedupe the legacy data or strengthen
``check_preservation`` in the sync config.
Returns
-------
by_primary : dict
``{primary_value: [checks]}`` e.g. ``{ar_id: [checks]}``.
by_fallback : list[dict]
One lookup dict per entry in ``fallback_keys``. Each maps a
composite tuple key to its preserved checks list.
"""
by_primary: dict[str, list[str]] = {}
by_fallback: list[dict[tuple, list[str]]] = [{} for _ in fallback_keys]
if not legacy_path.exists():
return by_primary, by_fallback
with open(legacy_path) as f:
data = json.load(f)
# Track which legacy requirement Ids contributed to each bucket so we
# can surface ambiguity after the scan completes.
primary_sources: dict[str, list[str]] = {}
fallback_sources: list[dict[tuple, list[str]]] = [{} for _ in fallback_keys]
for req in data.get("Requirements") or []:
legacy_id = req.get("Id") or "<missing-Id>"
checks = req.get("Checks") or []
pv = req.get(primary_key)
if pv:
primary_sources.setdefault(pv, []).append(legacy_id)
bucket = by_primary.setdefault(pv, [])
for c in checks:
if c not in bucket:
bucket.append(c)
attributes = req.get("Attributes") or []
if not attributes:
continue
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key is None:
continue
fallback_sources[i].setdefault(key, []).append(legacy_id)
bucket = by_fallback[i].setdefault(key, [])
for c in checks:
if c not in bucket:
bucket.append(c)
conflicts: list[str] = []
for pv, ids in primary_sources.items():
if len(ids) > 1:
conflicts.append(
f"primary_key={primary_key!r} value={pv!r} shared by {ids}"
)
for i, field_names in enumerate(fallback_keys):
for key, ids in fallback_sources[i].items():
if len(ids) > 1:
conflicts.append(
f"fallback_key={field_names} value={key!r} shared by {ids}"
)
if conflicts:
details = "\n - ".join(conflicts)
raise ValueError(
f"ambiguous preservation keys in {legacy_path} — cannot "
f"faithfully preserve Checks across distinct requirements:\n"
f" - {details}\n"
f"Fix: dedupe the legacy JSON, or strengthen "
f"'post_processing.check_preservation' in the sync config "
f"(e.g. add a more discriminating field to fallback_keys)."
)
return by_primary, by_fallback
def lookup_preserved_checks(
req: dict,
by_primary: dict,
by_fallback: list[dict],
primary_key: str,
fallback_keys: list[list[str]],
) -> list[str]:
"""Return preserved check ids for a requirement, trying the primary
key first then each fallback in order."""
pv = req.get(primary_key)
if pv and pv in by_primary:
return list(by_primary[pv])
attributes = req.get("Attributes") or []
if not attributes:
return []
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key and key in by_fallback[i]:
return list(by_fallback[i][key])
return []
# ---------------------------------------------------------------------------
# Provider output assembly
# ---------------------------------------------------------------------------
def resolve_output_path(template: str, framework: dict, provider_key: str) -> Path:
return Path(
template.format(
provider=provider_key,
framework=framework["name"].lower(),
version=framework["version"],
)
)
def build_provider_json(
config: dict,
provider: dict,
base_requirements: list[dict],
) -> tuple[dict, dict[str, int]]:
"""Produce the provider-specific JSON dict ready to dump.
Returns ``(json_dict, counts)`` where ``counts`` tracks how each
requirement's checks were resolved (primary, fallback, or none).
"""
framework = config["framework"]
preservation = config["post_processing"]["check_preservation"]
primary_key = preservation["primary_key"]
fallback_keys = preservation.get("fallback_keys") or []
# For versioned frameworks, the file we WRITE (output.path_template
# resolved at the new version) is not the file we want to READ legacy
# Checks from. Allow the config to override the legacy source path so
# a version bump can still preserve mappings from the previous file.
legacy_template = (
preservation.get("legacy_path_template")
or config["output"]["path_template"]
)
legacy_path = resolve_output_path(
legacy_template, framework, provider["key"]
)
by_primary, by_fallback = load_legacy_check_maps(
legacy_path, primary_key, fallback_keys
)
counts = {"primary": 0, "fallback": 0, "none": 0}
enriched: list[dict] = []
for req in base_requirements:
# Try primary key first
pv = req.get(primary_key)
checks: list[str] = []
source = "none"
if pv and pv in by_primary:
checks = list(by_primary[pv])
source = "primary"
else:
attributes = req.get("Attributes") or []
if attributes:
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key and key in by_fallback[i]:
checks = list(by_fallback[i][key])
source = "fallback"
break
counts[source] += 1
enriched.append(
{
"Id": req["Id"],
"Description": req["Description"],
# Shallow-copy attribute dicts so providers don't share refs
"Attributes": [dict(a) for a in req.get("Attributes") or []],
"Checks": checks,
}
)
description = framework["description_template"].format(
provider_display=provider["display"],
provider_key=provider["key"],
framework_name=framework["name"],
framework_display=framework["display_name"],
version=framework["version"],
)
out = {
"Framework": framework["name"],
"Version": framework["version"],
"Provider": provider["display"],
"Name": framework["display_name"],
"Description": description,
"Requirements": enriched,
}
return out, counts
# ---------------------------------------------------------------------------
# Pydantic post-validation
# ---------------------------------------------------------------------------
def pydantic_validate(json_path: Path) -> int:
"""Import Prowler lazily so the runner still works without Prowler
installed (validation step is skipped in that case)."""
try:
from prowler.lib.check.compliance_models import Compliance
except ImportError:
print(
" note: prowler package not importable — skipping Pydantic validation",
file=sys.stderr,
)
return -1
try:
parsed = Compliance.parse_file(str(json_path))
except Exception as exc:
raise RuntimeError(
f"Pydantic validation failed for {json_path}: {exc}"
) from exc
return len(parsed.Requirements)
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def main() -> int:
if len(sys.argv) != 2:
print("usage: sync_framework.py <config.yaml>", file=sys.stderr)
return 1
config_path = Path(sys.argv[1])
try:
config = load_config(config_path)
except ConfigError as exc:
print(f"config error: {exc}", file=sys.stderr)
return 2
framework_name = config["framework"]["name"]
upstream_dir = Path(config["upstream"]["dir"])
if not upstream_dir.exists():
print(
f"error: upstream cache dir {upstream_dir} not found\n"
f" hint: {config['upstream'].get('fetch_docs', '(see SKILL.md Workflow A Step 1)')}",
file=sys.stderr,
)
return 3
parser_module_name = config["parser"]["module"]
print(
f"Sync: framework={framework_name} version={config['framework']['version']} "
f"parser={parser_module_name}"
)
try:
parser = load_parser(parser_module_name)
except ConfigError as exc:
print(f"parser error: {exc}", file=sys.stderr)
return 4
print(f"Parsing upstream from {upstream_dir}...")
try:
base_requirements = parser.parse_upstream(config)
except FileNotFoundError as exc:
# A missing catalog declared in parser.catalog_files is a hard
# failure: emitting JSON with part of the framework silently
# dropped would violate the canonical-sync contract.
print(f"upstream error: {exc}", file=sys.stderr)
return 6
print(f" parser returned {len(base_requirements)} requirements")
# Safety-net: parser contract
try:
assert_unique_ids(base_requirements)
except ValueError as exc:
print(f"parser contract violation: {exc}", file=sys.stderr)
return 5
# Post-processing: family name normalization
norm_map = (
config.get("post_processing", {})
.get("family_name_normalization")
or {}
)
normalize_family_names(base_requirements, norm_map)
# Per-provider output
print()
for provider in config["providers"]:
provider_json, counts = build_provider_json(
config, provider, base_requirements
)
out_path = resolve_output_path(
config["output"]["path_template"],
config["framework"],
provider["key"],
)
out_path.parent.mkdir(parents=True, exist_ok=True)
with open(out_path, "w") as f:
json.dump(provider_json, f, indent=2, ensure_ascii=False)
f.write("\n")
validated = pydantic_validate(out_path)
validated_msg = (
f" pydantic_reqs={validated}" if validated >= 0 else " pydantic=skipped"
)
print(
f" {provider['key']}: total={len(provider_json['Requirements'])} "
f"matched_primary={counts['primary']} "
f"matched_fallback={counts['fallback']} "
f"new_or_unmatched={counts['none']}{validated_msg}"
)
print(f" wrote {out_path}")
print("\nDone.")
return 0
if __name__ == "__main__":
sys.exit(main())
+12 -50
View File
@@ -1,10 +1,10 @@
#!/bin/bash
# Setup AI Skills for Prowler development
# Configures AI coding assistants that follow agentskills.io standard:
# - Claude Code: .claude/skills/ symlink + CLAUDE.md symlink
# - Gemini CLI: .gemini/skills/ symlink + GEMINI.md symlink
# - Claude Code: .claude/skills/ symlink + CLAUDE.md copies
# - Gemini CLI: .gemini/skills/ symlink + GEMINI.md copies
# - Codex (OpenAI): .codex/skills/ symlink + AGENTS.md (native)
# - GitHub Copilot: .github/copilot-instructions.md symlink
# - GitHub Copilot: .github/copilot-instructions.md copy
#
# Usage:
# ./setup.sh # Interactive mode (select AI assistants)
@@ -37,28 +37,6 @@ SETUP_COPILOT=false
# HELPER FUNCTIONS
# =============================================================================
add_to_gitignore() {
local pattern="$1"
local gitignore_file="$REPO_ROOT/.gitignore"
local header="# AI Coding assistants assets"
# Create .gitignore if it doesn't exist
if [ ! -f "$gitignore_file" ]; then
touch "$gitignore_file"
fi
# Check if pattern exists (exact match or at end of file)
if ! grep -qxF "$pattern" "$gitignore_file"; then
# Check if header exists
if ! grep -qxF "$header" "$gitignore_file"; then
echo -e "\n\n$header" >> "$gitignore_file"
fi
echo "$pattern" >> "$gitignore_file"
echo -e "${GREEN} ✓ Added $pattern to .gitignore${NC}"
fi
}
show_help() {
echo "Usage: $0 [OPTIONS]"
echo ""
@@ -131,7 +109,6 @@ setup_claude() {
if [ ! -d "$REPO_ROOT/.claude" ]; then
mkdir -p "$REPO_ROOT/.claude"
fi
add_to_gitignore ".claude/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -142,9 +119,8 @@ setup_claude() {
ln -s "$SKILLS_SOURCE" "$target"
echo -e "${GREEN} ✓ .claude/skills -> skills/${NC}"
# Link AGENTS.md to CLAUDE.md
link_agents_md "CLAUDE.md"
add_to_gitignore "CLAUDE.md"
# Copy AGENTS.md to CLAUDE.md
copy_agents_md "CLAUDE.md"
}
setup_gemini() {
@@ -153,7 +129,6 @@ setup_gemini() {
if [ ! -d "$REPO_ROOT/.gemini" ]; then
mkdir -p "$REPO_ROOT/.gemini"
fi
add_to_gitignore ".gemini/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -164,9 +139,8 @@ setup_gemini() {
ln -s "$SKILLS_SOURCE" "$target"
echo -e "${GREEN} ✓ .gemini/skills -> skills/${NC}"
# Link AGENTS.md to GEMINI.md
link_agents_md "GEMINI.md"
add_to_gitignore "GEMINI.md"
# Copy AGENTS.md to GEMINI.md
copy_agents_md "GEMINI.md"
}
setup_codex() {
@@ -175,7 +149,6 @@ setup_codex() {
if [ ! -d "$REPO_ROOT/.codex" ]; then
mkdir -p "$REPO_ROOT/.codex"
fi
add_to_gitignore ".codex/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -191,19 +164,12 @@ setup_codex() {
setup_copilot() {
if [ -f "$REPO_ROOT/AGENTS.md" ]; then
mkdir -p "$REPO_ROOT/.github"
# Link AGENTS.md -> .github/copilot-instructions.md
local target="$REPO_ROOT/.github/copilot-instructions.md"
ln -sf "../AGENTS.md" "$target"
cp "$REPO_ROOT/AGENTS.md" "$REPO_ROOT/.github/copilot-instructions.md"
echo -e "${GREEN} ✓ AGENTS.md -> .github/copilot-instructions.md${NC}"
# Add specifically the file, NOT the .github folder
add_to_gitignore ".github/copilot-instructions.md"
fi
}
link_agents_md() {
copy_agents_md() {
local target_name="$1"
local agents_files
local count=0
@@ -213,15 +179,11 @@ link_agents_md() {
for agents_file in $agents_files; do
local agents_dir
agents_dir=$(dirname "$agents_file")
# Create relative symlink
# Since files are in same dir, we can just link to basename
(cd "$agents_dir" && ln -sf "$(basename "$agents_file")" "$target_name")
cp "$agents_file" "$agents_dir/$target_name"
count=$((count + 1))
done
echo -e "${GREEN}Linked $count AGENTS.md -> $target_name${NC}"
echo -e "${GREEN}Copied $count AGENTS.md -> $target_name${NC}"
}
# =============================================================================
@@ -340,4 +302,4 @@ echo "Configured:"
[ "$SETUP_COPILOT" = true ] && echo " • GitHub Copilot: .github/copilot-instructions.md"
echo ""
echo -e "${BLUE}Note: Restart your AI assistant to load the skills.${NC}"
echo -e "${BLUE} AGENTS.md is the source of truth - changes are reflected automatically via symlinks.${NC}"
echo -e "${BLUE} AGENTS.md is the source of truth - edit it, then re-run this script.${NC}"
+17 -17
View File
@@ -201,40 +201,40 @@ test_symlink_not_created_without_flag() {
}
# =============================================================================
# TESTS: AGENTS.md LINKING
# TESTS: AGENTS.md COPYING
# =============================================================================
test_link_claude_agents_md() {
test_copy_claude_agents_md() {
run_setup --claude > /dev/null
assert_symlink_exists "$TEST_DIR/CLAUDE.md" "Root CLAUDE.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/api/CLAUDE.md" "api/CLAUDE.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/ui/CLAUDE.md" "ui/CLAUDE.md should be a symlink"
assert_file_exists "$TEST_DIR/CLAUDE.md" "Root CLAUDE.md should exist" && \
assert_file_exists "$TEST_DIR/api/CLAUDE.md" "api/CLAUDE.md should exist" && \
assert_file_exists "$TEST_DIR/ui/CLAUDE.md" "ui/CLAUDE.md should exist"
}
test_link_gemini_agents_md() {
test_copy_gemini_agents_md() {
run_setup --gemini > /dev/null
assert_symlink_exists "$TEST_DIR/GEMINI.md" "Root GEMINI.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/api/GEMINI.md" "api/GEMINI.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/ui/GEMINI.md" "ui/GEMINI.md should be a symlink"
assert_file_exists "$TEST_DIR/GEMINI.md" "Root GEMINI.md should exist" && \
assert_file_exists "$TEST_DIR/api/GEMINI.md" "api/GEMINI.md should exist" && \
assert_file_exists "$TEST_DIR/ui/GEMINI.md" "ui/GEMINI.md should exist"
}
test_link_copilot_to_github() {
test_copy_copilot_to_github() {
run_setup --copilot > /dev/null
assert_symlink_exists "$TEST_DIR/.github/copilot-instructions.md" "Copilot instructions should be a symlink"
assert_file_exists "$TEST_DIR/.github/copilot-instructions.md" "Copilot instructions should exist"
}
test_link_codex_no_extra_files() {
test_copy_codex_no_extra_files() {
run_setup --codex > /dev/null
assert_file_not_exists "$TEST_DIR/CODEX.md" "CODEX.md should not be created"
}
test_link_not_created_without_flag() {
test_copy_not_created_without_flag() {
run_setup --codex > /dev/null
assert_symlink_not_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should not exist" && \
assert_symlink_not_exists "$TEST_DIR/GEMINI.md" "GEMINI.md should not exist"
assert_file_not_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should not exist" && \
assert_file_not_exists "$TEST_DIR/GEMINI.md" "GEMINI.md should not exist"
}
test_link_content_matches_source() {
test_copy_content_matches_source() {
run_setup --claude > /dev/null
local source_content target_content
source_content=$(cat "$TEST_DIR/AGENTS.md")
@@ -272,7 +272,7 @@ test_idempotent_multiple_runs() {
run_setup --claude > /dev/null
run_setup --claude > /dev/null
assert_symlink_exists "$TEST_DIR/.claude/skills" "Symlink should still exist after second run" && \
assert_symlink_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should still be a symlink after second run"
assert_file_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should still exist after second run"
}
# =============================================================================
File diff suppressed because it is too large Load Diff
@@ -120,7 +120,7 @@ class TestCloudTrailTimeline:
assert result[0]["event_name"] == "RunInstances"
def test_get_resource_timeline_prefers_uid_over_id(self, mock_session):
"""When both resource_id and resource_uid are provided, UID is tried first."""
"""When both resource_id and resource_uid are provided, UID should be used."""
mock_client = MagicMock()
mock_client.lookup_events.return_value = {"Events": []}
mock_session.client.return_value = mock_client
@@ -132,9 +132,9 @@ class TestCloudTrailTimeline:
resource_uid="arn:aws:ec2:us-east-1:123:instance/i-1234",
)
# Verify UID was used on the first lookup call
first_call = mock_client.lookup_events.call_args_list[0]
lookup_attrs = first_call.kwargs["LookupAttributes"]
# Verify UID was used in the lookup
call_args = mock_client.lookup_events.call_args
lookup_attrs = call_args.kwargs["LookupAttributes"]
assert (
lookup_attrs[0]["AttributeValue"]
== "arn:aws:ec2:us-east-1:123:instance/i-1234"
@@ -606,159 +606,3 @@ class TestIsReadOnlyEvent:
"""Verify write events are not marked as read-only."""
timeline = CloudTrailTimeline(session=mock_session)
assert timeline._is_read_only_event(event_name) is False
class TestExtractShortName:
"""Tests for _extract_short_name static method."""
@pytest.mark.parametrize(
"identifier,expected",
[
("arn:aws:s3:::my-bucket", "my-bucket"),
("arn:aws:iam::123456789012:user/alice", "alice"),
("arn:aws:iam::123456789012:role/MyRole", "MyRole"),
(
"arn:aws:ec2:us-east-1:123456789012:instance/i-0abc1234",
"i-0abc1234",
),
(
"arn:aws:lambda:us-east-1:123456789012:function:my-func",
"my-func",
),
("arn:aws:rds:us-east-1:123456789012:db:mydb", "mydb"),
("arn:aws:dynamodb:us-east-1:123456789012:table/MyTable", "MyTable"),
(
"arn:aws:kms:us-east-1:123456789012:key/abcd-efgh",
"abcd-efgh",
),
("i-0abc1234", "i-0abc1234"),
("my-bucket", "my-bucket"),
("", ""),
],
)
def test_extract_short_name(self, identifier, expected):
assert CloudTrailTimeline._extract_short_name(identifier) == expected
class TestLookupEventsFallback:
"""Tests for the ARN-to-short-name fallback in _lookup_events."""
@pytest.fixture
def mock_session(self):
return MagicMock()
@pytest.fixture
def sample_event(self):
return {
"EventId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"EventTime": datetime(2024, 1, 15, 10, 30, 0, tzinfo=timezone.utc),
"EventName": "CreateBucket",
"EventSource": "s3.amazonaws.com",
"CloudTrailEvent": json.dumps(
{
"userIdentity": {
"type": "IAMUser",
"arn": "arn:aws:iam::123456789012:user/admin",
"userName": "admin",
}
}
),
}
def test_no_fallback_when_arn_returns_events(self, mock_session, sample_event):
"""When the ARN lookup returns events, we do not retry with the short name."""
mock_client = MagicMock()
mock_client.lookup_events.return_value = {"Events": [sample_event]}
mock_session.client.return_value = mock_client
timeline = CloudTrailTimeline(session=mock_session)
result = timeline.get_resource_timeline(
region="us-east-1",
resource_uid="arn:aws:kms:us-east-1:123456789012:key/abcd-efgh",
)
assert len(result) == 1
assert mock_client.lookup_events.call_count == 1
call = mock_client.lookup_events.call_args
assert (
call.kwargs["LookupAttributes"][0]["AttributeValue"]
== "arn:aws:kms:us-east-1:123456789012:key/abcd-efgh"
)
def test_fallback_to_short_name_when_arn_returns_empty(
self, mock_session, sample_event
):
"""When the ARN lookup returns nothing, we retry with the short name."""
mock_client = MagicMock()
mock_client.lookup_events.side_effect = [
{"Events": []},
{"Events": [sample_event]},
]
mock_session.client.return_value = mock_client
timeline = CloudTrailTimeline(session=mock_session)
result = timeline.get_resource_timeline(
region="us-east-1", resource_uid="arn:aws:s3:::my-bucket"
)
assert len(result) == 1
assert mock_client.lookup_events.call_count == 2
first_call, second_call = mock_client.lookup_events.call_args_list
assert (
first_call.kwargs["LookupAttributes"][0]["AttributeValue"]
== "arn:aws:s3:::my-bucket"
)
assert (
second_call.kwargs["LookupAttributes"][0]["AttributeValue"] == "my-bucket"
)
def test_no_fallback_when_identifier_has_no_short_name(self, mock_session):
"""A non-ARN identifier collapses to itself; no retry should fire."""
mock_client = MagicMock()
mock_client.lookup_events.return_value = {"Events": []}
mock_session.client.return_value = mock_client
timeline = CloudTrailTimeline(session=mock_session)
result = timeline.get_resource_timeline(
region="us-east-1", resource_id="i-0abc1234"
)
assert result == []
assert mock_client.lookup_events.call_count == 1
def test_no_fallback_when_identifier_is_not_arn(self, mock_session):
"""A non-ARN identifier with / or : must not trigger the retry."""
mock_client = MagicMock()
mock_client.lookup_events.return_value = {"Events": []}
mock_session.client.return_value = mock_client
timeline = CloudTrailTimeline(session=mock_session)
result = timeline.get_resource_timeline(
region="us-east-1", resource_id="some-prefix/weird:value"
)
assert result == []
assert mock_client.lookup_events.call_count == 1
def test_both_lookups_empty_returns_empty_list(self, mock_session):
"""If both the ARN and short-name lookups return empty, we return []."""
mock_client = MagicMock()
mock_client.lookup_events.return_value = {"Events": []}
mock_session.client.return_value = mock_client
timeline = CloudTrailTimeline(session=mock_session)
result = timeline.get_resource_timeline(
region="us-east-1",
resource_uid="arn:aws:ec2:us-east-1:123456789012:instance/i-0abc1234",
)
assert result == []
assert mock_client.lookup_events.call_count == 2
first_call, second_call = mock_client.lookup_events.call_args_list
assert (
first_call.kwargs["LookupAttributes"][0]["AttributeValue"]
== "arn:aws:ec2:us-east-1:123456789012:instance/i-0abc1234"
)
assert (
second_call.kwargs["LookupAttributes"][0]["AttributeValue"] == "i-0abc1234"
)
@@ -433,29 +433,6 @@ class TestCloudflareValidateCredentials:
with pytest.raises(CloudflareNoAccountsError):
CloudflareProvider.validate_credentials(session)
def test_validate_credentials_breaks_on_repeated_account_ids(self):
"""Pagination must stop when the SDK repeats account IDs to avoid infinite loops."""
def repeating_accounts():
account = MagicMock()
account.id = ACCOUNT_ID
while True:
yield account
mock_client = MagicMock()
mock_client.user.get.side_effect = Exception("Some other error")
mock_client.accounts.list.return_value = repeating_accounts()
session = CloudflareSession(
client=mock_client,
api_token=API_TOKEN,
api_key=None,
api_email=None,
)
# Must return without hanging; repeated IDs break the loop.
CloudflareProvider.validate_credentials(session)
class TestCloudflareTestConnection:
"""Tests for test_connection method."""
@@ -12,8 +12,6 @@ from prowler.providers.github.exceptions.exceptions import (
GithubInvalidCredentialsError,
GithubInvalidProviderIdError,
GithubInvalidTokenError,
GithubRepoListFileNotFoundError,
GithubRepoListFileReadError,
GithubSetUpIdentityError,
GithubSetUpSessionError,
)
@@ -710,81 +708,3 @@ class Test_GithubProvider_Scoping:
assert provider_none.repositories == []
assert provider_none.organizations == []
class TestGitHubProviderLoadReposFromFile:
"""Tests for GithubProvider._load_repos_from_file"""
def _make_provider(self):
"""Create a GithubProvider instance with mocked session/identity."""
with (
patch(
"prowler.providers.github.github_provider.GithubProvider.setup_session",
return_value=GithubSession(token=PAT_TOKEN, id="", key=""),
),
patch(
"prowler.providers.github.github_provider.GithubProvider.setup_identity",
return_value=GithubIdentityInfo(
account_id=ACCOUNT_ID,
account_name=ACCOUNT_NAME,
account_url=ACCOUNT_URL,
),
),
):
provider = GithubProvider(
personal_access_token=PAT_TOKEN,
)
return provider
def test_load_repos_from_file_happy_path(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
repo_file.write_text("owner/repo-a\nowner/repo-b\nowner/repo-c\n")
provider._load_repos_from_file(str(repo_file))
assert "owner/repo-a" in provider.repositories
assert "owner/repo-b" in provider.repositories
assert "owner/repo-c" in provider.repositories
def test_load_repos_from_file_comments_and_blanks(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
repo_file.write_text(
"# This is a comment\n"
"\n"
"owner/repo-a\n"
" # Another comment\n"
" \n"
"owner/repo-b\n"
)
provider._load_repos_from_file(str(repo_file))
assert provider.repositories == ["owner/repo-a", "owner/repo-b"]
def test_load_repos_from_file_not_found(self):
provider = self._make_provider()
with pytest.raises(GithubRepoListFileNotFoundError):
provider._load_repos_from_file("/nonexistent/path/repos.txt")
def test_load_repos_from_file_exceeds_max_lines(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
# Write MAX_REPO_LIST_LINES + 1 lines to trigger the guard
lines = [f"owner/repo-{i}" for i in range(provider.MAX_REPO_LIST_LINES + 1)]
repo_file.write_text("\n".join(lines) + "\n")
with pytest.raises(GithubRepoListFileReadError):
provider._load_repos_from_file(str(repo_file))
def test_load_repos_from_file_skips_long_names(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
long_name = "a" * (provider.MAX_REPO_NAME_LENGTH + 1)
repo_file.write_text(f"owner/valid-repo\n{long_name}\nowner/also-valid\n")
provider._load_repos_from_file(str(repo_file))
assert provider.repositories == ["owner/valid-repo", "owner/also-valid"]
@@ -82,14 +82,13 @@ class Test_GitHubArguments:
arguments.init_parser(mock_github_args)
# Verify scoping arguments were added
assert self.mock_scoping_group.add_argument.call_count == 3
assert self.mock_scoping_group.add_argument.call_count == 2
# Check that all scoping arguments are present
calls = self.mock_scoping_group.add_argument.call_args_list
scoping_args = [call[0][0] for call in calls]
assert "--repository" in scoping_args
assert "--repo-list-file" in scoping_args
assert "--organization" in scoping_args
def test_repository_argument_configuration(self):
@@ -278,33 +277,6 @@ class Test_GitHubArguments_Integration:
assert args.repository == ["owner1/repo1"]
assert args.organization == ["org1"]
def test_real_argument_parsing_with_repo_list_file(self):
"""Test parsing arguments with repo-list-file scoping"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
common_parser = argparse.ArgumentParser(add_help=False)
mock_github_args = MagicMock()
mock_github_args.subparsers = subparsers
mock_github_args.common_providers_parser = common_parser
arguments.init_parser(mock_github_args)
# Parse arguments with repo-list-file
args = parser.parse_args(
[
"github",
"--personal-access-token",
"test-token",
"--repo-list-file",
"/path/to/repos.txt",
]
)
assert args.personal_access_token == "test-token"
assert args.repo_list_file == "/path/to/repos.txt"
assert args.repository is None
def test_real_argument_parsing_empty_scoping(self):
"""Test parsing arguments with empty scoping values"""
parser = argparse.ArgumentParser()
+1 -1
View File
@@ -13,5 +13,5 @@ README.md
!.next/static
!.next/standalone
.git
.pre-commit-config.yaml
.husky
scripts/setup-git-hooks.js
-37
View File
@@ -1,37 +0,0 @@
orphan: true
repos:
- repo: local
hooks:
- id: ui-typecheck
name: UI - TypeScript Check
entry: pnpm run typecheck
language: system
files: '\.(ts|tsx|js|jsx)$'
pass_filenames: false
priority: 0
- id: ui-lint
name: UI - ESLint
entry: pnpm run lint:check
language: system
files: '\.(ts|tsx|js|jsx)$'
pass_filenames: false
priority: 0
- id: ui-tests
name: UI - Unit Tests
entry: pnpm exec vitest related --run
language: system
files: '\.(ts|tsx|js|jsx)$'
exclude: '\.test\.|\.spec\.|vitest\.config|vitest\.setup'
pass_filenames: true
priority: 1
- id: ui-build
name: UI - Build
entry: pnpm run build
language: system
files: '\.(ts|tsx|js|jsx|json|css)$'
pass_filenames: false
priority: 2
+1 -26
View File
@@ -2,37 +2,12 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.25.0] (Prowler UNRELEASED)
### 🔄 Changed
- Redesign compliance page with a horizontal ThreatScore card (always-visible pillar breakdown + ActionDropdown), client-side search for compliance frameworks, compact scan selector trigger, responsive mobile filters, download-started toasts for CSV/PDF exports, enhanced compliance cards with truncated titles, and Alert-based empty/error states; migrate Progress component from HeroUI to shadcn [(#10767)](https://github.com/prowler-cloud/prowler/pull/10767)
- Backward-compatibility middleware redirect from `/sign-up?invitation_token=…` to `/invitation/accept?invitation_token=…`; new invitation emails use `/invitation/accept` directly [(#10797)](https://github.com/prowler-cloud/prowler/pull/10797)
---
## [1.24.2] (Prowler v5.24.2)
### 🐞 Fixed
- Default muted filter now applied consistently on the findings page and the finding-group resource drill-down, keeping muted findings hidden unless the "include muted findings" checkbox is opted in [(#10818)](https://github.com/prowler-cloud/prowler/pull/10818)
---
## [1.24.1] (Prowler v5.24.1)
### 🐞 Fixed
- Findings and filter UX fixes: exclude muted findings by default in the resource detail drawer and finding group resource views, show category context label (for example `Status: FAIL`) on MultiSelect triggers instead of hiding the placeholder, and add a `wide` width option for filter dropdowns applied to the findings Scan filter to prevent label truncation [(#10734)](https://github.com/prowler-cloud/prowler/pull/10734)
- Findings grouped view now handles zero-resource IaC counters, refines drawer loading states, and adds provider indicators to finding groups [(#10736)](https://github.com/prowler-cloud/prowler/pull/10736)
- Other Findings for this resource: ordering by `severity` [(#10778)](https://github.com/prowler-cloud/prowler/pull/10778)
- Other Findings for this resource: show `delta` indicator [(#10778)](https://github.com/prowler-cloud/prowler/pull/10778)
- Compliance: requirement findings do not show muted findings [(#10778)](https://github.com/prowler-cloud/prowler/pull/10778)
- Latest new findings: link to finding groups order by `-severity,-last_seen_at` [(#10778)](https://github.com/prowler-cloud/prowler/pull/10778)
### 🔒 Security
- Upgrade React to 19.2.5 and Next.js to 16.2.3 to mitigate CVE-2026-23869 (React2DoS), a high-severity unauthenticated remote DoS vulnerability in the React Flight Protocol's Server Function deserialization [(#10754)](https://github.com/prowler-cloud/prowler/pull/10754)
- Disable Next.js 16 Server Function argument logging to prevent sign-in credentials (email/password) from being printed to the terminal during development[(#10760)](https://github.com/prowler-cloud/prowler/pull/10760)
---
+2 -2
View File
@@ -85,10 +85,10 @@ git clone git@github.com:prowler-cloud/ui.git
pnpm install
```
**Note:** The `pnpm install` command will automatically configure prek Git hooks for code quality checks. If hooks are not installed, run from the repo root:
**Note:** The `pnpm install` command will automatically configure Git hooks for code quality checks. If you experience issues, you can manually configure them:
```bash
prek install
git config core.hooksPath "ui/.husky"
```
#### Run the development server
+7 -4
View File
@@ -6,10 +6,12 @@ import { handleApiResponse } from "@/lib/server-actions-helper";
export const getCompliancesOverview = async ({
scanId,
region,
query,
filters = {},
}: {
scanId?: string;
region?: string | string[];
query?: string;
filters?: Record<string, string | string[] | undefined>;
} = {}) => {
const headers = await getAuthHeaders({ contentType: false });
@@ -29,6 +31,8 @@ export const getCompliancesOverview = async ({
setParam("filter[scan_id]", scanId);
setParam("filter[region__in]", region);
if (query) url.searchParams.set("filter[search]", query);
try {
const response = await fetch(url.toString(), {
headers,
@@ -42,16 +46,15 @@ export const getCompliancesOverview = async ({
};
export const getComplianceOverviewMetadataInfo = async ({
query = "",
sort = "",
filters = {},
}: {
sort?: string;
filters?: Record<string, string | string[] | undefined>;
} = {}) => {
}) => {
const headers = await getAuthHeaders({ contentType: false });
const url = new URL(`${apiBaseUrl}/compliance-overviews/metadata`);
if (query) url.searchParams.append("filter[search]", query);
if (sort) url.searchParams.append("sort", sort);
Object.entries(filters).forEach(([key, value]) => {
@@ -115,35 +115,4 @@ describe("adaptFindingsByResourceResponse — malformed input", () => {
expect(result[0].id).toBe("finding-1");
expect(result[0].checkId).toBe("s3_check");
});
it("should normalize a single finding response into a one-item drawer array", () => {
// Given — getFindingById returns a single JSON:API resource object
const input = {
data: {
id: "finding-1",
attributes: {
uid: "uid-1",
check_id: "s3_check",
status: "FAIL",
severity: "critical",
check_metadata: {
checktitle: "S3 Check",
},
},
relationships: {
resources: { data: [] },
scan: { data: null },
},
},
included: [],
};
// When
const result = adaptFindingsByResourceResponse(input);
// Then
expect(result).toHaveLength(1);
expect(result[0].id).toBe("finding-1");
expect(result[0].checkTitle).toBe("S3 Check");
});
});
@@ -165,18 +165,16 @@ type IncludedDict = Record<string, IncludedItem>;
* then resolves each finding's resource and provider relationships.
*/
interface JsonApiResponse {
data: FindingApiItem | FindingApiItem[];
data: FindingApiItem[];
included?: Record<string, unknown>[];
}
function isJsonApiResponse(value: unknown): value is JsonApiResponse {
const data = (value as { data?: unknown })?.data;
return (
value !== null &&
typeof value === "object" &&
"data" in value &&
(Array.isArray(data) || (data !== null && typeof data === "object"))
Array.isArray((value as { data: unknown }).data)
);
}
@@ -190,11 +188,8 @@ export function adaptFindingsByResourceResponse(
const resourcesDict = createDict("resources", apiResponse) as IncludedDict;
const scansDict = createDict("scans", apiResponse) as IncludedDict;
const providersDict = createDict("providers", apiResponse) as IncludedDict;
const findings = Array.isArray(apiResponse.data)
? apiResponse.data
: [apiResponse.data];
return findings.map((item) => {
return apiResponse.data.map((item) => {
const attrs = item.attributes;
const meta = (attrs.check_metadata || {}) as Record<string, unknown>;
const remediationRaw = meta.remediation as
@@ -43,7 +43,6 @@ vi.mock("@/actions/finding-groups", () => ({
}));
import {
getLatestFindingsByResourceUid,
resolveFindingIdsByCheckIds,
resolveFindingIdsByVisibleGroupResources,
} from "./findings-by-resource";
@@ -263,46 +262,3 @@ describe("resolveFindingIdsByVisibleGroupResources", () => {
expect(fetchMock).not.toHaveBeenCalled();
});
});
describe("getLatestFindingsByResourceUid", () => {
beforeEach(() => {
vi.clearAllMocks();
vi.stubGlobal("fetch", fetchMock);
getAuthHeadersMock.mockResolvedValue({ Authorization: "Bearer token" });
handleApiResponseMock.mockResolvedValue({ data: [] });
});
it("should restrict to FAIL, exclude muted findings, and apply severity/time sorting by default", async () => {
fetchMock.mockResolvedValue(new Response("", { status: 200 }));
await getLatestFindingsByResourceUid({
resourceUid: "resource-1",
});
const calledUrl = new URL(fetchMock.mock.calls[0][0]);
expect(calledUrl.pathname).toBe("/api/v1/findings/latest");
expect(calledUrl.searchParams.get("filter[resource_uid]")).toBe(
"resource-1",
);
// Status filter is applied server-side so the page[size]=50 window
// always holds FAIL rows — guards against PASS-heavy resources
// starving FAILs out of the result.
expect(calledUrl.searchParams.get("filter[status]")).toBe("FAIL");
expect(calledUrl.searchParams.get("filter[muted]")).toBe("false");
expect(calledUrl.searchParams.get("sort")).toBe("severity,-updated_at");
});
it("should include muted findings only when explicitly requested", async () => {
fetchMock.mockResolvedValue(new Response("", { status: 200 }));
await getLatestFindingsByResourceUid({
resourceUid: "resource-1",
includeMuted: true,
});
const calledUrl = new URL(fetchMock.mock.calls[0][0]);
expect(calledUrl.searchParams.get("filter[status]")).toBe("FAIL");
expect(calledUrl.searchParams.get("filter[muted]")).toBe("include");
expect(calledUrl.searchParams.get("sort")).toBe("severity,-updated_at");
});
});
+2 -5
View File
@@ -250,12 +250,10 @@ export const getLatestFindingsByResourceUid = async ({
resourceUid,
page = 1,
pageSize = 50,
includeMuted = false,
}: {
resourceUid: string;
page?: number;
pageSize?: number;
includeMuted?: boolean;
}) => {
const headers = await getAuthHeaders({ contentType: false });
@@ -264,9 +262,8 @@ export const getLatestFindingsByResourceUid = async ({
);
url.searchParams.append("filter[resource_uid]", resourceUid);
url.searchParams.append("filter[status]", "FAIL");
url.searchParams.append("filter[muted]", includeMuted ? "include" : "false");
url.searchParams.append("sort", "severity,-updated_at");
url.searchParams.append("filter[muted]", "include");
url.searchParams.append("sort", "-severity,-updated_at");
if (page) url.searchParams.append("page[number]", page.toString());
if (pageSize) url.searchParams.append("page[size]", pageSize.toString());
+1 -9
View File
@@ -141,15 +141,7 @@ export const getLatestMetadataInfo = async ({
}
};
interface GetFindingByIdOptions {
source?: "resource-detail-drawer";
}
export const getFindingById = async (
findingId: string,
include = "",
_options?: GetFindingByIdOptions,
) => {
export const getFindingById = async (findingId: string, include = "") => {
const headers = await getAuthHeaders({ contentType: false });
const url = new URL(`${apiBaseUrl}/findings/${findingId}`);
@@ -8,6 +8,10 @@ import { useEffect, useRef, useState } from "react";
import { acceptInvitation } from "@/actions/invitations";
import { Button } from "@/components/shadcn";
import {
INVITATION_ACTION_PARAM,
INVITATION_SIGNUP_ACTION,
} from "@/lib/invitation-routing";
type AcceptState =
| { kind: "no-token" }
@@ -200,7 +204,7 @@ export function AcceptInvitationClient({
className="w-full"
onClick={() => {
router.push(
`/sign-up?invitation_token=${encodeURIComponent(token!)}`,
`/sign-up?invitation_token=${encodeURIComponent(token!)}&${INVITATION_ACTION_PARAM}=${INVITATION_SIGNUP_ACTION}`,
);
}}
>
@@ -4,7 +4,6 @@ import { getLatestFindings } from "@/actions/findings/findings";
import { LighthouseBanner } from "@/components/lighthouse/banner";
import { LinkToFindings } from "@/components/overview";
import { ColumnLatestFindings } from "@/components/overview/new-findings-table/table";
import { CardTitle } from "@/components/shadcn";
import { DataTable } from "@/components/ui/table";
import { createDict } from "@/lib/helper";
import { FindingProps, SearchParamsProps } from "@/types";
@@ -58,23 +57,24 @@ export async function FindingsViewSSR({ searchParams }: FindingsViewSSRProps) {
};
return (
<div className="flex w-full flex-col">
<div className="flex w-full flex-col gap-6">
<LighthouseBanner />
<div className="relative w-full flex-col justify-between md:flex-row">
<div className="flex w-full flex-col items-start gap-2 md:flex-row md:items-center">
<h3 className="text-sm font-bold text-nowrap whitespace-nowrap uppercase">
Latest new failing findings
</h3>
<p className="text-text-neutral-tertiary text-xs whitespace-nowrap">
Showing the latest 10 new failing findings by severity.
</p>
<LinkToFindings />
</div>
</div>
<DataTable
key={`dashboard-findings-${Date.now()}`}
columns={ColumnLatestFindings}
data={(expandedResponse?.data || []) as FindingProps[]}
header={
<div className="flex w-full items-center justify-between gap-4">
<div className="flex flex-col gap-0.5">
<CardTitle>Latest New Failed Findings</CardTitle>
<p className="text-text-neutral-tertiary text-xs">
Showing the latest 10 sorted by severity
</p>
</div>
<LinkToFindings />
</div>
}
/>
</div>
);
@@ -1,7 +1,6 @@
import { Skeleton } from "@heroui/skeleton";
import { Suspense } from "react";
import { SkeletonTableNewFindings } from "@/components/overview/new-findings-table/table";
import { SearchParamsProps } from "@/types";
import { GraphsTabsClient } from "./_components/graphs-tabs-client";
@@ -19,10 +18,6 @@ const LoadingFallback = () => (
</div>
);
const TAB_FALLBACKS: Partial<Record<TabId, React.ReactNode>> = {
findings: <SkeletonTableNewFindings />,
};
type GraphComponent = React.ComponentType<{ searchParams: SearchParamsProps }>;
const GRAPH_COMPONENTS: Record<TabId, GraphComponent> = {
@@ -43,10 +38,9 @@ export const GraphsTabsWrapper = async ({
const tabsContent = Object.fromEntries(
GRAPH_TABS.map((tab) => {
const Component = GRAPH_COMPONENTS[tab.id];
const fallback = TAB_FALLBACKS[tab.id] ?? <LoadingFallback />;
return [
tab.id,
<Suspense key={tab.id} fallback={fallback}>
<Suspense key={tab.id} fallback={<LoadingFallback />}>
<Component searchParams={searchParams} />
</Suspense>,
];
@@ -78,7 +78,7 @@ export default async function ComplianceDetail({
await Promise.all([
getComplianceOverviewMetadataInfo({
filters: {
"filter[scan_id]": selectedScanId ?? undefined,
"filter[scan_id]": selectedScanId,
},
}),
getComplianceAttributes(complianceId),
-16
View File
@@ -1,16 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("Compliance overview page", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "page.tsx");
const source = readFileSync(filePath, "utf8");
it("delegates client-side search to ComplianceOverviewGrid", () => {
expect(source).toContain("ComplianceOverviewGrid");
expect(source).not.toContain("filter[search]");
});
});
+84 -83
View File
@@ -1,4 +1,3 @@
import { Info } from "lucide-react";
import { Suspense } from "react";
import {
@@ -8,14 +7,12 @@ import {
import { getThreatScore } from "@/actions/overview";
import { getScans } from "@/actions/scans";
import {
ComplianceCard,
ComplianceSkeletonGrid,
NoScansAvailable,
ThreatScoreBadge,
} from "@/components/compliance";
import { ComplianceFilters } from "@/components/compliance/compliance-header/compliance-filters";
import { ComplianceOverviewGrid } from "@/components/compliance/compliance-overview-grid";
import { Alert, AlertDescription } from "@/components/shadcn/alert";
import { Card, CardContent } from "@/components/shadcn/card/card";
import { ComplianceHeader } from "@/components/compliance/compliance-header/compliance-header";
import { ContentLayout } from "@/components/ui";
import {
ExpandedScanData,
@@ -33,6 +30,12 @@ export default async function Compliance({
const resolvedSearchParams = await searchParams;
const searchParamsKey = JSON.stringify(resolvedSearchParams || {});
const filters = Object.fromEntries(
Object.entries(resolvedSearchParams).filter(([key]) =>
key.startsWith("filter["),
),
);
const scansData = await getScans({
filters: {
"filter[state]": "completed",
@@ -76,12 +79,9 @@ export default async function Compliance({
.filter(Boolean) as ExpandedScanData[];
// Use scanId from URL, or select the first scan if not provided
const scanIdParam = resolvedSearchParams.scanId;
const scanIdFromUrl = Array.isArray(scanIdParam)
? scanIdParam[0]
: scanIdParam;
const selectedScanId: string | null =
scanIdFromUrl || expandedScansData[0]?.id || null;
const selectedScanId =
resolvedSearchParams.scanId || expandedScansData[0]?.id || null;
const query = (filters["filter[search]"] as string) || "";
// Find the selected scan
const selectedScan = expandedScansData.find(
@@ -102,6 +102,7 @@ export default async function Compliance({
// Fetch metadata if we have a selected scan
const metadataInfoData = selectedScanId
? await getComplianceOverviewMetadataInfo({
query,
filters: {
"filter[scan_id]": selectedScanId,
},
@@ -130,39 +131,28 @@ export default async function Compliance({
<ContentLayout title="Compliance" icon="lucide:shield-check">
{selectedScanId ? (
<>
{/* Row 1: Filters */}
<div className="mb-6">
<ComplianceFilters
scans={expandedScansData}
uniqueRegions={uniqueRegions}
selectedScanId={selectedScanId}
/>
<div className="mb-6 flex flex-col gap-6 lg:flex-row lg:items-start lg:justify-between">
<div className="min-w-0 flex-1">
<ComplianceHeader
scans={expandedScansData}
uniqueRegions={uniqueRegions}
/>
</div>
{threatScoreData &&
typeof selectedScanId === "string" &&
selectedScan && (
<div className="w-full lg:w-[360px] lg:flex-shrink-0">
<ThreatScoreBadge
score={threatScoreData.score}
scanId={selectedScanId}
provider={selectedScan.providerInfo.provider}
selectedScan={selectedScanData}
sectionScores={threatScoreData.sectionScores}
/>
</div>
)}
</div>
{/* Row 2: ThreatScore card — full width, horizontal */}
{threatScoreData &&
typeof selectedScanId === "string" &&
selectedScan && (
<div className="mb-6">
<ThreatScoreBadge
score={threatScoreData.score}
scanId={selectedScanId}
provider={selectedScan.providerInfo.provider}
selectedScan={selectedScanData}
sectionScores={threatScoreData.sectionScores}
/>
</div>
)}
{/* Row 3: Compliance grid with client-side search */}
<Suspense
key={searchParamsKey}
fallback={
<ComplianceOverviewPanel>
<ComplianceSkeletonGrid />
</ComplianceOverviewPanel>
}
>
<Suspense key={searchParamsKey} fallback={<ComplianceSkeletonGrid />}>
<SSRComplianceGrid
searchParams={resolvedSearchParams}
selectedScan={selectedScanData}
@@ -186,23 +176,25 @@ const SSRComplianceGrid = async ({
const scanId = searchParams.scanId?.toString() || "";
const regionFilter = searchParams["filter[region__in]"]?.toString() || "";
// Extract all filter parameters
const filters = Object.fromEntries(
Object.entries(searchParams).filter(([key]) => key.startsWith("filter[")),
);
// Extract query from filters
const query = (filters["filter[search]"] as string) || "";
// Only fetch compliance data if we have a valid scanId
const compliancesData =
scanId && scanId.trim() !== ""
? await getCompliancesOverview({
scanId,
region: regionFilter,
query,
})
: { data: [], errors: [] };
const type = compliancesData?.data?.type;
const frameworks = compliancesData?.data
?.filter((compliance: ComplianceOverviewData) => {
return compliance.attributes.framework !== "ProwlerThreatScore";
})
.sort((a: ComplianceOverviewData, b: ComplianceOverviewData) =>
a.attributes.framework.localeCompare(b.attributes.framework),
);
// Check if the response contains no data
if (
@@ -212,49 +204,58 @@ const SSRComplianceGrid = async ({
type === "tasks"
) {
return (
<Alert variant="info">
<Info className="size-4" />
<AlertDescription>
This scan has no compliance data available yet, please select a
different one.
</AlertDescription>
</Alert>
<div className="flex h-full items-center">
<div className="text-default-500 text-sm">
No compliance data available for the selected scan.
</div>
</div>
);
}
// Handle errors returned by the API
if (compliancesData?.errors?.length > 0) {
return (
<Alert variant="info">
<Info className="size-4" />
<AlertDescription>Provide a valid scan ID.</AlertDescription>
</Alert>
<div className="flex h-full items-center">
<div className="text-default-500 text-sm">Provide a valid scan ID.</div>
</div>
);
}
return (
<ComplianceOverviewPanel>
<ComplianceOverviewGrid
frameworks={frameworks}
scanId={scanId}
selectedScan={selectedScan}
/>
</ComplianceOverviewPanel>
);
};
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{compliancesData.data
.filter((compliance: ComplianceOverviewData) => {
// Filter out ProwlerThreatScore from the grid
return compliance.attributes.framework !== "ProwlerThreatScore";
})
.sort((a: ComplianceOverviewData, b: ComplianceOverviewData) =>
a.attributes.framework.localeCompare(b.attributes.framework),
)
.map((compliance: ComplianceOverviewData) => {
const { attributes, id } = compliance;
const {
framework,
version,
requirements_passed,
total_requirements,
} = attributes;
const ComplianceOverviewPanel = ({
children,
}: {
children: React.ReactNode;
}) => {
return (
<Card
variant="base"
padding="none"
className="minimal-scrollbar shadow-small relative z-0 w-full gap-4 overflow-auto"
>
<CardContent className="flex flex-col gap-4 p-4">{children}</CardContent>
</Card>
return (
<ComplianceCard
key={id}
title={framework}
version={version}
passingRequirements={requirements_passed}
totalRequirements={total_requirements}
prevPassingRequirements={requirements_passed}
prevTotalRequirements={total_requirements}
scanId={scanId}
complianceId={id}
id={id}
selectedScan={selectedScan}
/>
);
})}
</div>
);
};
+2 -8
View File
@@ -25,10 +25,8 @@ describe("findings page", () => {
expect(source).toContain("resolveFindingScanDateFilters");
});
it("uses resolved filters to choose getFindingGroups for historical queries and getLatestFindingGroups otherwise", () => {
expect(source).toContain("hasHistoricalData");
expect(source).toContain("hasDateOrScanFilter(filtersWithScanDates)");
expect(source).toContain("hasDateOrScanFilter(filters)");
it("uses getLatestFindingGroups for non-date/scan queries and getFindingGroups for historical", () => {
expect(source).toContain("hasDateOrScan");
expect(source).toContain("getFindingGroups");
expect(source).toContain("getLatestFindingGroups");
});
@@ -36,8 +34,4 @@ describe("findings page", () => {
it("guards errors array access with a length check", () => {
expect(source).toContain("errors?.length > 0");
});
it("applies the shared default muted filter so muted findings are hidden unless the caller opts in", () => {
expect(source).toContain("applyDefaultMutedFilter");
});
});
+16 -17
View File
@@ -16,7 +16,6 @@ import {
import { ContentLayout } from "@/components/ui";
import { FilterTransitionWrapper } from "@/contexts";
import {
applyDefaultMutedFilter,
createScanDetailsMapping,
extractFiltersAndQuery,
extractSortAndKey,
@@ -35,26 +34,25 @@ export default async function Findings({
const { encodedSort } = extractSortAndKey(resolvedSearchParams);
const { filters, query } = extractFiltersAndQuery(resolvedSearchParams);
// Check if the searchParams contain any date or scan filter
const hasDateOrScan = hasDateOrScanFilter(resolvedSearchParams);
const [providersData, scansData] = await Promise.all([
getProviders({ pageSize: 50 }),
getScans({ pageSize: 50 }),
]);
const filtersWithScanDates = applyDefaultMutedFilter(
await resolveFindingScanDateFilters({
filters,
scans: scansData?.data || [],
loadScan: async (scanId: string) => {
const response = await getScan(scanId);
return response?.data;
},
}),
);
const hasHistoricalData = hasDateOrScanFilter(filtersWithScanDates);
const filtersWithScanDates = await resolveFindingScanDateFilters({
filters,
scans: scansData?.data || [],
loadScan: async (scanId: string) => {
const response = await getScan(scanId);
return response?.data;
},
});
const metadataInfoData = await (
hasHistoricalData ? getMetadataInfo : getLatestMetadataInfo
hasDateOrScan ? getMetadataInfo : getLatestMetadataInfo
)({
query,
sort: encodedSort,
@@ -121,9 +119,10 @@ const SSRDataTable = async ({
const pageSize = parseInt(searchParams.pageSize?.toString() || "10", 10);
const { encodedSort } = extractSortAndKey(searchParams);
const hasHistoricalData = hasDateOrScanFilter(filters);
// Check if the searchParams contain any date or scan filter
const hasDateOrScan = hasDateOrScanFilter(searchParams);
const fetchFindingGroups = hasHistoricalData
const fetchFindingGroups = hasDateOrScan
? getFindingGroups
: getLatestFindingGroups;
@@ -152,7 +151,7 @@ const SSRDataTable = async ({
data={groups}
metadata={findingGroupsData?.meta}
resolvedFilters={filters}
hasHistoricalData={hasHistoricalData}
hasHistoricalData={hasDateOrScan}
/>
</>
);
@@ -62,7 +62,6 @@ export const ClientAccordionContent = ({
filters: {
"filter[check_id__in]": checkIds.join(","),
"filter[scan]": scanId,
"filter[muted]": "false",
...(region && { "filter[region__in]": region }),
},
page: parseInt(pageNumber, 10),
@@ -1,30 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceCard", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-card.tsx");
const source = readFileSync(filePath, "utf8");
it("keeps the shadcn Card base variant", () => {
expect(source).toContain('variant="base"');
});
it("uses a responsive stacked layout for narrow screens", () => {
expect(source).toContain("flex-col");
expect(source).toContain("sm:flex-row");
});
it("uses the shadcn progress component instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/progress"');
expect(source).not.toContain("@heroui/progress");
});
it("places compact actions in the icon column on larger screens", () => {
expect(source).toContain('orientation="column"');
expect(source).toContain('buttonWidth="icon"');
});
});
+54 -77
View File
@@ -1,20 +1,11 @@
"use client";
import { Progress } from "@heroui/progress";
import Image from "next/image";
import { useRouter, useSearchParams } from "next/navigation";
import { Card, CardContent } from "@/components/shadcn/card/card";
import { Progress } from "@/components/shadcn/progress";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { getReportTypeForFramework } from "@/lib/compliance/compliance-report-types";
import {
getScoreIndicatorClass,
type ScoreColorVariant,
} from "@/lib/compliance/score-utils";
import { ScanEntity } from "@/types/scans";
import { getComplianceIcon } from "../icons";
@@ -54,9 +45,13 @@ export const ComplianceCard: React.FC<ComplianceCardProps> = ({
(passingRequirements / totalRequirements) * 100,
);
const getRatingVariant = (value: number): ScoreColorVariant => {
if (value <= 10) return "danger";
if (value <= 40) return "warning";
const getRatingColor = (ratingPercentage: number) => {
if (ratingPercentage <= 10) {
return "danger";
}
if (ratingPercentage <= 40) {
return "warning";
}
return "success";
};
@@ -85,76 +80,58 @@ export const ComplianceCard: React.FC<ComplianceCardProps> = ({
onClick={navigateToDetail}
>
<CardContent className="p-0">
<div className="flex w-full flex-col gap-3 sm:flex-row sm:items-start">
<div className="flex shrink-0 items-center justify-between sm:flex-col sm:items-start sm:gap-2">
{getComplianceIcon(title) && (
<Image
src={getComplianceIcon(title)}
alt={`${title} logo`}
className="h-10 w-10 min-w-10 self-start rounded-md border border-gray-300 bg-white object-contain p-1"
/>
)}
<div
className="shrink-0"
onClick={(e) => e.stopPropagation()}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.stopPropagation();
}
<div className="flex w-full items-center gap-4">
{getComplianceIcon(title) && (
<Image
src={getComplianceIcon(title)}
alt={`${title} logo`}
className="h-10 w-10 min-w-10 rounded-md border border-gray-300 bg-white object-contain p-1"
/>
)}
<div className="flex w-full flex-col">
<h4 className="text-small mb-1 leading-5 font-bold">
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</h4>
<Progress
label="Score:"
size="sm"
aria-label="Compliance score"
value={ratingPercentage}
showValueLabel={true}
classNames={{
track: "drop-shadow-sm border border-default",
label: "tracking-wider font-medium text-default-600 text-xs",
value: "text-foreground/60 -mb-2",
}}
role="group"
tabIndex={0}
>
<ComplianceDownloadContainer
compact
orientation="column"
buttonWidth="icon"
presentation="dropdown"
scanId={scanId}
complianceId={complianceId}
reportType={getReportTypeForFramework(title)}
disabled={hasRegionFilter}
/>
</div>
</div>
<div className="flex w-full min-w-0 flex-col gap-3">
<Tooltip>
<TooltipTrigger asChild>
<h4 className="text-small truncate leading-5 font-bold">
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</h4>
</TooltipTrigger>
<TooltipContent>
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</TooltipContent>
</Tooltip>
<div className="flex flex-col gap-2">
<div className="flex items-center justify-between gap-3 text-xs">
<span className="text-text-neutral-secondary font-medium tracking-wider">
Score:
</span>
<span className="text-text-neutral-secondary">
{ratingPercentage}%
</span>
</div>
<Progress
aria-label="Compliance score"
value={ratingPercentage}
className="border-border-neutral-secondary h-2.5 border drop-shadow-sm"
indicatorClassName={getScoreIndicatorClass(
getRatingVariant(ratingPercentage),
)}
/>
</div>
<div className="flex flex-col gap-3 sm:flex-row sm:items-center sm:justify-between">
<small className="min-w-0">
color={getRatingColor(ratingPercentage)}
/>
<div className="mt-2 flex items-center justify-between">
<small>
<span className="mr-1 text-xs font-semibold">
{passingRequirements} / {totalRequirements}
</span>
Passing Requirements
</small>
<div
onClick={(e) => e.stopPropagation()}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.stopPropagation();
}
}}
role="group"
tabIndex={0}
>
<ComplianceDownloadContainer
compact
scanId={scanId}
complianceId={complianceId}
reportType={getReportTypeForFramework(title)}
disabled={hasRegionFilter}
/>
</div>
</div>
</div>
</div>
@@ -1,133 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { beforeEach, describe, expect, it, vi } from "vitest";
const { downloadComplianceCsvMock, downloadComplianceReportPdfMock } =
vi.hoisted(() => ({
downloadComplianceCsvMock: vi.fn(),
downloadComplianceReportPdfMock: vi.fn(),
}));
vi.mock("@/lib/helper", () => ({
downloadComplianceCsv: downloadComplianceCsvMock,
downloadComplianceReportPdf: downloadComplianceReportPdfMock,
}));
vi.mock("@/components/ui", () => ({
toast: {},
}));
import { ComplianceDownloadContainer } from "./compliance-download-container";
describe("ComplianceDownloadContainer", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-download-container.tsx");
const source = readFileSync(filePath, "utf8");
beforeEach(() => {
vi.clearAllMocks();
});
it("uses the shared action dropdown for the card actions mode", () => {
expect(source).toContain("ActionDropdown");
expect(source).not.toContain("@heroui/button");
});
it("should expose an accessible actions menu trigger", () => {
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
expect(
screen.getByRole("button", { name: "Open compliance export actions" }),
).toBeInTheDocument();
});
it("should support fixed icon-sized dropdown trigger in column mode", () => {
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
orientation="column"
buttonWidth="icon"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
const trigger = screen.getByRole("button", {
name: "Open compliance export actions",
});
expect(trigger.className).toContain("border-text-neutral-secondary");
});
it("should open export actions from the compact trigger", async () => {
const user = userEvent.setup();
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
expect(screen.getByText("Download CSV report")).toBeInTheDocument();
expect(screen.getByText("Download PDF report")).toBeInTheDocument();
});
it("should trigger both downloads from the actions menu", async () => {
const user = userEvent.setup();
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
await user.click(
screen.getByRole("menuitem", { name: /Download CSV report/i }),
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
await user.click(
screen.getByRole("menuitem", { name: /Download PDF report/i }),
);
expect(downloadComplianceCsvMock).toHaveBeenCalledWith(
"scan-1",
"compliance-1",
{},
);
expect(downloadComplianceReportPdfMock).toHaveBeenCalledWith(
"scan-1",
"threatscore",
{},
);
});
});
@@ -4,15 +4,6 @@ import { DownloadIcon, FileTextIcon } from "lucide-react";
import { useState } from "react";
import { Button } from "@/components/shadcn/button/button";
import {
ActionDropdown,
ActionDropdownItem,
} from "@/components/shadcn/dropdown";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { toast } from "@/components/ui";
import type { ComplianceReportType } from "@/lib/compliance/compliance-report-types";
import {
@@ -27,9 +18,6 @@ interface ComplianceDownloadContainerProps {
reportType?: ComplianceReportType;
compact?: boolean;
disabled?: boolean;
orientation?: "row" | "column";
buttonWidth?: "auto" | "icon";
presentation?: "buttons" | "dropdown";
}
export const ComplianceDownloadContainer = ({
@@ -38,14 +26,9 @@ export const ComplianceDownloadContainer = ({
reportType,
compact = false,
disabled = false,
orientation = "row",
buttonWidth = "auto",
presentation = "buttons",
}: ComplianceDownloadContainerProps) => {
const [isDownloadingCsv, setIsDownloadingCsv] = useState(false);
const [isDownloadingPdf, setIsDownloadingPdf] = useState(false);
const isIconWidth = buttonWidth === "icon";
const isDropdown = presentation === "dropdown";
const handleDownloadCsv = async () => {
if (isDownloadingCsv) return;
@@ -69,116 +52,40 @@ export const ComplianceDownloadContainer = ({
const buttonClassName = cn(
"border-button-primary text-button-primary hover:bg-button-primary/10",
compact &&
!isIconWidth &&
"h-7 px-2 text-xs sm:w-full sm:justify-center sm:px-2.5",
orientation === "column" && !isIconWidth && "w-full",
isIconWidth && "size-10 rounded-lg p-0",
compact && "h-7 px-2 text-xs",
);
const labelClassName = isIconWidth
? "sr-only"
: compact
? "sr-only sm:not-sr-only"
: undefined;
const showTooltip = compact || isIconWidth;
return (
<div
className={cn(
"flex",
orientation === "column"
? "flex-col items-start"
: compact
? "w-full justify-end sm:w-auto"
: "flex-row",
)}
>
{isDropdown ? (
<ActionDropdown
variant={isIconWidth ? "bordered" : "table"}
ariaLabel="Open compliance export actions"
<div className={cn("flex gap-2", compact ? "items-center" : "flex-col")}>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
aria-label="Download compliance CSV report"
>
<FileTextIcon
size={14}
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
CSV
</Button>
{reportType && (
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
aria-label="Download compliance PDF report"
>
<ActionDropdownItem
icon={
<FileTextIcon
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
}
label="Download CSV report"
onSelect={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
{reportType && (
<ActionDropdownItem
icon={
<DownloadIcon
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
}
label="Download PDF report"
onSelect={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
/>
)}
</ActionDropdown>
) : (
<div
className={cn(
"flex gap-2",
orientation === "column"
? isIconWidth
? "flex-col items-start"
: "flex-col items-stretch"
: compact
? "w-full flex-wrap items-center justify-end sm:w-auto sm:flex-nowrap"
: "flex-row flex-wrap items-center",
)}
>
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
aria-label="Download compliance CSV report"
>
<FileTextIcon
size={14}
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
<span className={labelClassName}>CSV</span>
</Button>
</TooltipTrigger>
{showTooltip && (
<TooltipContent>Download CSV report</TooltipContent>
)}
</Tooltip>
{reportType && (
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
aria-label="Download compliance PDF report"
>
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
<span className={labelClassName}>PDF</span>
</Button>
</TooltipTrigger>
{showTooltip && (
<TooltipContent>Download PDF report</TooltipContent>
)}
</Tooltip>
)}
</div>
PDF
</Button>
)}
</div>
);
@@ -1,76 +0,0 @@
"use client";
import { useRouter, useSearchParams } from "next/navigation";
import { ClearFiltersButton } from "@/components/filters/clear-filters-button";
import {
MultiSelect,
MultiSelectContent,
MultiSelectItem,
MultiSelectSelectAll,
MultiSelectSeparator,
MultiSelectTrigger,
MultiSelectValue,
} from "@/components/shadcn/select/multiselect";
import { useUrlFilters } from "@/hooks/use-url-filters";
import { ScanSelector, SelectScanComplianceDataProps } from "./scan-selector";
interface ComplianceFiltersProps {
scans: SelectScanComplianceDataProps["scans"];
uniqueRegions: string[];
selectedScanId: string;
}
export const ComplianceFilters = ({
scans,
uniqueRegions,
selectedScanId,
}: ComplianceFiltersProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const { updateFilter } = useUrlFilters();
const handleScanChange = (selectedKey: string) => {
const params = new URLSearchParams(searchParams);
params.set("scanId", selectedKey);
router.push(`?${params.toString()}`, { scroll: false });
};
const regionValues =
searchParams.get("filter[region__in]")?.split(",").filter(Boolean) ?? [];
return (
<div className="flex max-w-4xl flex-wrap items-center gap-4">
<div className="w-full sm:max-w-[380px] sm:min-w-[200px] sm:flex-1">
<ScanSelector
scans={scans}
selectedScanId={selectedScanId}
onSelectionChange={handleScanChange}
/>
</div>
{uniqueRegions.length > 0 && (
<div className="w-full sm:max-w-[280px] sm:min-w-[200px] sm:flex-1">
<MultiSelect
values={regionValues}
onValuesChange={(values) => updateFilter("region__in", values)}
>
<MultiSelectTrigger size="default">
<MultiSelectValue placeholder="All Regions" />
</MultiSelectTrigger>
<MultiSelectContent search={false} width="wide">
<MultiSelectSelectAll>Select All</MultiSelectSelectAll>
<MultiSelectSeparator />
{uniqueRegions.map((region) => (
<MultiSelectItem key={region} value={region}>
{region}
</MultiSelectItem>
))}
</MultiSelectContent>
</MultiSelect>
</div>
)}
<ClearFiltersButton showCount />
</div>
);
};
@@ -1,18 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceHeader", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-header.tsx");
const source = readFileSync(filePath, "utf8");
it("renders the scan selector inside the shared filters grid using default layout", () => {
expect(source).toContain("prependElement");
expect(source).toContain("<DataCompliance");
expect(source).toContain("DataTableFilterCustom");
expect(source).not.toContain("gridClassName");
});
});
@@ -35,9 +35,6 @@ export const ComplianceHeader = ({
selectedScan,
}: ComplianceHeaderProps) => {
const frameworkFilters = [];
const prependElement = showProviders ? (
<DataCompliance scans={scans} className="w-full sm:col-span-2" />
) : undefined;
// Add CIS Profile Level filter if framework is CIS
if (framework === "CIS") {
@@ -45,7 +42,6 @@ export const ComplianceHeader = ({
key: "cis_profile_level",
labelCheckboxGroup: "Level",
values: ["Level 1", "Level 2"],
width: "wide" as const,
index: 0, // Show first
showSelectAll: false, // No "Select All" option since Level 2 includes Level 1
defaultValues: ["Level 2"], // Default to Level 2 selected (which includes Level 1)
@@ -59,7 +55,6 @@ export const ComplianceHeader = ({
key: "region__in",
labelCheckboxGroup: "Regions",
values: uniqueRegions,
width: "wide" as const,
index: 1, // Show after framework filters
},
]
@@ -82,11 +77,9 @@ export const ComplianceHeader = ({
{selectedScan && <ComplianceScanInfo scan={selectedScan} />}
{/* Showed in the compliance page */}
{!hideFilters && (allFilters.length > 0 || showProviders) && (
<DataTableFilterCustom
filters={allFilters}
prependElement={prependElement}
/>
{showProviders && <DataCompliance scans={scans} />}
{!hideFilters && allFilters.length > 0 && (
<DataTableFilterCustom filters={allFilters} />
)}
</div>
{logoPath && complianceTitle && (
@@ -7,13 +7,11 @@ import {
ScanSelector,
SelectScanComplianceDataProps,
} from "@/components/compliance/compliance-header/index";
import { cn } from "@/lib/utils";
interface DataComplianceProps {
scans: SelectScanComplianceDataProps["scans"];
className?: string;
}
export const DataCompliance = ({ scans, className }: DataComplianceProps) => {
export const DataCompliance = ({ scans }: DataComplianceProps) => {
const router = useRouter();
const searchParams = useSearchParams();
@@ -38,7 +36,7 @@ export const DataCompliance = ({ scans, className }: DataComplianceProps) => {
};
return (
<div className={cn("w-full", className)}>
<div className="flex max-w-fit">
<ScanSelector
scans={scans}
selectedScanId={selectedScanId}
@@ -1,3 +1,2 @@
export * from "./compliance-filters";
export * from "./data-compliance";
export * from "./scan-selector";
@@ -1,6 +1,5 @@
"use client";
import { Badge } from "@/components/shadcn/badge/badge";
import {
Select,
SelectContent,
@@ -8,7 +7,6 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/shadcn/select/select";
import { getScanEntityLabel } from "@/lib/helper-filters";
import { ProviderType, ScanProps } from "@/types";
import { ComplianceScanInfo } from "./compliance-scan-info";
@@ -31,7 +29,6 @@ export const ScanSelector = ({
onSelectionChange,
}: SelectScanComplianceDataProps) => {
const selectedScan = scans.find((item) => item.id === selectedScanId);
const triggerLabel = selectedScan ? getScanEntityLabel(selectedScan) : "";
return (
<Select
@@ -42,28 +39,21 @@ export const ScanSelector = ({
}
}}
>
<SelectTrigger className="w-full">
<SelectTrigger className="w-full max-w-[360px]">
<SelectValue placeholder="Select a scan">
{selectedScan ? (
<>
<span className="text-text-neutral-secondary shrink-0 text-xs">
Scan:
</span>
<Badge variant="tag" className="truncate">
{triggerLabel}
</Badge>
</>
<ComplianceScanInfo scan={selectedScan} />
) : (
"Select a scan"
)}
</SelectValue>
</SelectTrigger>
<SelectContent>
<SelectContent className="max-w-[360px]">
{scans.map((scan) => (
<SelectItem
key={scan.id}
value={scan.id}
className="data-[state=checked]:bg-bg-neutral-tertiary [&_svg:not([class*='size-'])]:size-6"
className="data-[state=checked]:bg-bg-neutral-tertiary"
>
<ComplianceScanInfo scan={scan} />
</SelectItem>
@@ -1,70 +0,0 @@
"use client";
import { useState } from "react";
import { ComplianceCard } from "@/components/compliance/compliance-card";
import { DataTableSearch } from "@/components/ui/table/data-table-search";
import type { ComplianceOverviewData } from "@/types/compliance";
import type { ScanEntity } from "@/types/scans";
interface ComplianceOverviewGridProps {
frameworks: ComplianceOverviewData[];
scanId: string;
selectedScan?: ScanEntity;
}
export const ComplianceOverviewGrid = ({
frameworks,
scanId,
selectedScan,
}: ComplianceOverviewGridProps) => {
const [searchTerm, setSearchTerm] = useState("");
const filteredFrameworks = frameworks.filter((compliance) =>
compliance.attributes.framework
.toLowerCase()
.includes(searchTerm.toLowerCase()),
);
return (
<>
<div className="flex items-center justify-between gap-4">
<DataTableSearch
controlledValue={searchTerm}
onSearchChange={setSearchTerm}
placeholder="Search frameworks..."
/>
<span className="text-text-neutral-secondary shrink-0 text-sm">
{filteredFrameworks.length.toLocaleString()} Total Entries
</span>
</div>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{filteredFrameworks.map((compliance) => {
const { attributes, id } = compliance;
const {
framework,
version,
requirements_passed,
total_requirements,
} = attributes;
return (
<ComplianceCard
key={id}
title={framework}
version={version}
passingRequirements={requirements_passed}
totalRequirements={total_requirements}
prevPassingRequirements={requirements_passed}
prevTotalRequirements={total_requirements}
scanId={scanId}
complianceId={id}
id={id}
selectedScan={selectedScan}
/>
);
})}
</div>
</>
);
};
-2
View File
@@ -13,12 +13,10 @@ export * from "./compliance-custom-details/cis-details";
export * from "./compliance-custom-details/ens-details";
export * from "./compliance-custom-details/iso-details";
export * from "./compliance-download-container";
export * from "./compliance-header/compliance-filters";
export * from "./compliance-header/compliance-header";
export * from "./compliance-header/compliance-scan-info";
export * from "./compliance-header/data-compliance";
export * from "./compliance-header/scan-selector";
export * from "./compliance-overview-grid";
export * from "./no-scans-available";
export * from "./skeletons/bar-chart-skeleton";
export * from "./skeletons/compliance-accordion-skeleton";
@@ -1,17 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceSkeletonGrid", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-grid-skeleton.tsx");
const source = readFileSync(filePath, "utf8");
it("uses shadcn skeletons instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/skeleton/skeleton"');
expect(source).not.toContain("@heroui/card");
expect(source).not.toContain("@heroui/skeleton");
});
});
@@ -1,11 +1,19 @@
import { Skeleton } from "@/components/shadcn/skeleton/skeleton";
import { Card } from "@heroui/card";
import { Skeleton } from "@heroui/skeleton";
import React from "react";
export const ComplianceSkeletonGrid = () => {
return (
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{[...Array(28)].map((_, index) => (
<Skeleton key={index} className="h-28 rounded-xl" />
))}
</div>
<Card className="h-fit w-full p-4">
<div className="3xl:grid-cols-4 grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-3">
{[...Array(28)].map((_, index) => (
<div key={index} className="flex flex-col gap-4">
<Skeleton className="h-28 rounded-lg">
<div className="bg-default-300 h-full"></div>
</Skeleton>
</div>
))}
</div>
</Card>
);
};
@@ -1,32 +0,0 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ThreatScoreBadge", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "threatscore-badge.tsx");
const source = readFileSync(filePath, "utf8");
it("uses shadcn card and progress components instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/card/card"');
expect(source).toContain('from "@/components/shadcn/progress"');
expect(source).not.toContain("@heroui/card");
expect(source).not.toContain("@heroui/progress");
});
it("uses ActionDropdown for downloads instead of ComplianceDownloadContainer", () => {
expect(source).toContain("ActionDropdown");
expect(source).toContain("ActionDropdownItem");
expect(source).toContain("downloadComplianceCsv");
expect(source).toContain("downloadComplianceReportPdf");
expect(source).not.toContain("ComplianceDownloadContainer");
});
it("does not use Collapsible components", () => {
expect(source).not.toContain("Collapsible");
expect(source).not.toContain("CollapsibleTrigger");
expect(source).not.toContain("CollapsibleContent");
});
});
+109 -80
View File
@@ -1,24 +1,27 @@
"use client";
import { DownloadIcon, FileTextIcon } from "lucide-react";
import { Card, CardBody } from "@heroui/card";
import { Progress } from "@heroui/progress";
import {
ChevronDown,
ChevronUp,
DownloadIcon,
FileTextIcon,
} from "lucide-react";
import { useRouter, useSearchParams } from "next/navigation";
import { useState } from "react";
import type { SectionScores } from "@/actions/overview/threat-score";
import { ThreatScoreLogo } from "@/components/compliance/threatscore-logo";
import { Card, CardContent } from "@/components/shadcn/card/card";
import { Button } from "@/components/shadcn/button/button";
import {
ActionDropdown,
ActionDropdownItem,
} from "@/components/shadcn/dropdown";
import { Progress } from "@/components/shadcn/progress";
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/shadcn/collapsible";
import { toast } from "@/components/ui";
import { COMPLIANCE_REPORT_TYPES } from "@/lib/compliance/compliance-report-types";
import {
getScoreColor,
getScoreIndicatorClass,
getScoreTextClass,
} from "@/lib/compliance/score-utils";
import { getScoreColor, getScoreTextClass } from "@/lib/compliance/score-utils";
import {
downloadComplianceCsv,
downloadComplianceReportPdf,
@@ -41,8 +44,9 @@ export const ThreatScoreBadge = ({
}: ThreatScoreBadgeProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const [isDownloadingCsv, setIsDownloadingCsv] = useState(false);
const [isDownloadingPdf, setIsDownloadingPdf] = useState(false);
const [isDownloadingCsv, setIsDownloadingCsv] = useState(false);
const [isExpanded, setIsExpanded] = useState(false);
const complianceId = `prowler_threatscore_${provider.toLowerCase()}`;
@@ -65,18 +69,7 @@ export const ThreatScoreBadge = ({
router.push(`${path}?${params.toString()}`);
};
const handleDownloadCsv = async () => {
if (isDownloadingCsv) return;
setIsDownloadingCsv(true);
try {
await downloadComplianceCsv(scanId, complianceId, toast);
} finally {
setIsDownloadingCsv(false);
}
};
const handleDownloadPdf = async () => {
if (isDownloadingPdf) return;
setIsDownloadingPdf(true);
try {
await downloadComplianceReportPdf(
@@ -89,12 +82,23 @@ export const ThreatScoreBadge = ({
}
};
const handleDownloadCsv = async () => {
setIsDownloadingCsv(true);
try {
await downloadComplianceCsv(scanId, complianceId, toast);
} finally {
setIsDownloadingCsv(false);
}
};
return (
<Card variant="base" padding="md" className="relative gap-4">
<CardContent className="flex flex-col gap-4 p-0 pr-14 lg:flex-row lg:items-start lg:gap-6">
{/* Clickable ThreatScore button */}
<Card
shadow="sm"
className="border-default-200 h-full border bg-transparent"
>
<CardBody className="flex flex-row flex-wrap items-center justify-between gap-3 p-4 lg:flex-col lg:items-stretch lg:justify-start">
<button
className="border-border-neutral-secondary bg-bg-neutral-tertiary hover:border-border-neutral-primary hover:bg-bg-neutral-secondary flex shrink-0 cursor-pointer flex-row items-center justify-between gap-4 rounded-xl border p-3 pr-12 text-left transition-colors lg:pr-3"
className="border-default-200 hover:border-default-300 hover:bg-default-50/50 flex w-full cursor-pointer flex-row items-center justify-between gap-4 rounded-lg border bg-transparent p-3 transition-all"
onClick={handleCardClick}
type="button"
>
@@ -107,67 +111,92 @@ export const ThreatScoreBadge = ({
<Progress
aria-label="ThreatScore progress"
value={score}
className="border-border-neutral-secondary h-2.5 w-24 border"
indicatorClassName={getScoreIndicatorClass(getScoreColor(score))}
color={getScoreColor(score)}
size="sm"
className="w-24"
/>
</div>
</button>
{/* Pillar breakdown — always visible */}
{sectionScores && Object.keys(sectionScores).length > 0 && (
<div className="border-border-neutral-secondary flex-1 space-y-2 border-t pt-3 lg:border-t-0 lg:border-l lg:pt-0 lg:pl-6">
{Object.entries(sectionScores)
.sort(([, a], [, b]) => a - b)
.map(([section, sectionScore]) => (
<div key={section} className="flex items-center gap-2 text-xs">
<span className="text-text-neutral-secondary w-1/3 min-w-0 shrink-0 truncate lg:w-1/4">
{section}
</span>
<Progress
aria-label={`${section} score`}
value={sectionScore}
className="border-border-neutral-secondary h-2 min-w-16 flex-1 border"
indicatorClassName={getScoreIndicatorClass(
getScoreColor(sectionScore),
)}
/>
<span
className={`w-12 shrink-0 text-right font-medium ${getScoreTextClass(sectionScore)}`}
<Collapsible
open={isExpanded}
onOpenChange={setIsExpanded}
className="w-full"
>
<CollapsibleTrigger
aria-label={
isExpanded ? "Hide pillar breakdown" : "Show pillar breakdown"
}
className="text-default-500 hover:text-default-700 flex w-auto items-center justify-center gap-1 py-1 text-xs transition-colors lg:w-full"
>
{isExpanded ? (
<>
<ChevronUp size={14} />
Hide pillar breakdown
</>
) : (
<>
<ChevronDown size={14} />
Show pillar breakdown
</>
)}
</CollapsibleTrigger>
<CollapsibleContent className="border-default-200 mt-2 w-full space-y-2 border-t pt-2">
{Object.entries(sectionScores)
.sort(([, a], [, b]) => a - b)
.map(([section, sectionScore]) => (
<div
key={section}
className="flex items-center gap-2 text-xs"
>
{sectionScore.toFixed(1)}%
</span>
</div>
))}
</div>
<span className="text-default-600 w-1/3 min-w-0 shrink-0 truncate">
{section}
</span>
<Progress
aria-label={`${section} score`}
value={sectionScore}
color={getScoreColor(sectionScore)}
size="sm"
className="min-w-16 flex-1"
/>
<span
className={`w-12 shrink-0 text-right font-medium ${getScoreTextClass(sectionScore)}`}
>
{sectionScore.toFixed(1)}%
</span>
</div>
))}
</CollapsibleContent>
</Collapsible>
)}
</CardContent>
{/* ActionDropdown for downloads — top-right */}
<div className="absolute top-3 right-4">
<ActionDropdown
variant="bordered"
ariaLabel="Open compliance export actions"
>
<ActionDropdownItem
icon={
<FileTextIcon
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
}
label="Download CSV report"
onSelect={handleDownloadCsv}
/>
<ActionDropdownItem
icon={
<DownloadIcon
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
}
label="Download PDF report"
onSelect={handleDownloadPdf}
/>
</ActionDropdown>
</div>
<div className="flex gap-2">
<Button
size="sm"
variant="outline"
className="flex-1"
onClick={handleDownloadPdf}
disabled={isDownloadingPdf || isDownloadingCsv}
>
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
PDF
</Button>
<Button
size="sm"
variant="outline"
className="flex-1"
onClick={handleDownloadCsv}
disabled={isDownloadingCsv || isDownloadingPdf}
>
<FileTextIcon size={14} />
CSV
</Button>
</div>
</CardBody>
</Card>
);
};
+5 -3
View File
@@ -12,7 +12,6 @@ import {
PopoverTrigger,
} from "@/components/shadcn/popover";
import { useUrlFilters } from "@/hooks/use-url-filters";
import { toLocalDateString } from "@/lib/date-utils";
import { cn } from "@/lib/utils";
/** Batch mode: caller controls both the pending date value and the notification callback (all-or-nothing). */
@@ -68,14 +67,17 @@ export const CustomDatePicker = ({
const applyDateFilter = (selectedDate: Date | undefined) => {
if (onBatchChange) {
// Batch mode: notify caller instead of updating URL
onBatchChange("inserted_at", toLocalDateString(selectedDate) ?? "");
onBatchChange(
"inserted_at",
selectedDate ? format(selectedDate, "yyyy-MM-dd") : "",
);
return;
}
// Instant mode (default): push to URL immediately
if (selectedDate) {
// Format as YYYY-MM-DD for the API
updateFilter("inserted_at", toLocalDateString(selectedDate) ?? "");
updateFilter("inserted_at", format(selectedDate, "yyyy-MM-dd"));
} else {
updateFilter("inserted_at", null);
}
+45 -9
View File
@@ -20,13 +20,10 @@ import { DataTableFilterCustom } from "@/components/ui/table";
import { useFilterBatch } from "@/hooks/use-filter-batch";
import { getCategoryLabel, getGroupLabel } from "@/lib/categories";
import { FilterType, ScanEntity } from "@/types";
import { DATA_TABLE_FILTER_MODE } from "@/types/filters";
import { DATA_TABLE_FILTER_MODE, FilterParam } from "@/types/filters";
import { ProviderProps } from "@/types/providers";
import {
buildFindingsFilterChips,
getFindingsFilterDisplayValue,
} from "./findings-filters.utils";
import { getFindingsFilterDisplayValue } from "./findings-filters.utils";
interface FindingsFiltersProps {
/** Provider data for ProviderTypeSelector and AccountsSelector */
@@ -40,6 +37,30 @@ interface FindingsFiltersProps {
uniqueGroups: string[];
}
/**
* Maps raw filter param keys (e.g. "filter[severity__in]") to human-readable labels.
* Used to render chips in the FilterSummaryStrip.
* Typed as Record<FilterParam, string> so TypeScript enforces exhaustiveness any
* addition to FilterParam will cause a compile error here if the label is missing.
*/
const FILTER_KEY_LABELS: Record<FilterParam, string> = {
"filter[provider_type__in]": "Provider",
"filter[provider_id__in]": "Account",
"filter[severity__in]": "Severity",
"filter[status__in]": "Status",
"filter[delta__in]": "Delta",
"filter[region__in]": "Region",
"filter[service__in]": "Service",
"filter[resource_type__in]": "Resource Type",
"filter[category__in]": "Category",
"filter[resource_groups__in]": "Resource Group",
"filter[scan__in]": "Scan",
"filter[scan_id]": "Scan",
"filter[scan_id__in]": "Scan",
"filter[inserted_at]": "Date",
"filter[muted]": "Muted",
};
export const FindingsFilters = ({
providers,
completedScanIds,
@@ -111,7 +132,6 @@ export const FindingsFilters = ({
key: FilterType.SCAN,
labelCheckboxGroup: "Scan ID",
values: completedScanIds,
width: "wide" as const,
valueLabelMapping: scanDetails,
labelFormatter: (value: string) =>
getFindingsFilterDisplayValue(`filter[${FilterType.SCAN}]`, value, {
@@ -124,9 +144,25 @@ export const FindingsFilters = ({
const hasCustomFilters = customFilters.length > 0;
const filterChips: FilterChip[] = buildFindingsFilterChips(pendingFilters, {
providers,
scans: scanDetails,
// Build FilterChip[] from pendingFilters — one chip per individual value, not per key.
// Skip filter[muted]="false" — it is the silent default and should not appear as a chip.
const filterChips: FilterChip[] = [];
Object.entries(pendingFilters).forEach(([key, values]) => {
if (!values || values.length === 0) return;
const label = FILTER_KEY_LABELS[key as FilterParam] ?? key;
values.forEach((value) => {
// Do not show a chip for the default muted=false state
if (key === "filter[muted]" && value === "false") return;
filterChips.push({
key,
label,
value,
displayValue: getFindingsFilterDisplayValue(key, value, {
providers,
scans: scanDetails,
}),
});
});
});
// Handler for removing a single chip: update the pending filter to remove that value.
@@ -3,10 +3,7 @@ import { describe, expect, it } from "vitest";
import { ProviderProps } from "@/types/providers";
import { ScanEntity } from "@/types/scans";
import {
buildFindingsFilterChips,
getFindingsFilterDisplayValue,
} from "./findings-filters.utils";
import { getFindingsFilterDisplayValue } from "./findings-filters.utils";
function makeProvider(
overrides: Partial<ProviderProps> & { id: string },
@@ -101,7 +98,7 @@ describe("getFindingsFilterDisplayValue", () => {
it("shows the resolved scan badge label for scan filters instead of formatting the raw scan id", () => {
expect(
getFindingsFilterDisplayValue("filter[scan__in]", "scan-1", { scans }),
).toBe("AWS - Nightly scan");
).toBe("Nightly scan");
});
it("normalizes finding statuses for display", () => {
@@ -122,17 +119,7 @@ describe("getFindingsFilterDisplayValue", () => {
);
});
it("formats the singular delta filter the same as delta__in", () => {
// The API registers the filter as `filter[delta]` (exact), not `delta__in`.
// Both shapes must resolve to the same human label so chips don't show
// the raw "new" going through formatLabel ("NEW" via the 3-letter acronym heuristic).
expect(getFindingsFilterDisplayValue("filter[delta]", "new")).toBe("New");
expect(getFindingsFilterDisplayValue("filter[delta]", "changed")).toBe(
"Changed",
);
});
it("uses the provider display name regardless of account alias/uid", () => {
it("falls back to the scan provider uid when the alias is missing", () => {
expect(
getFindingsFilterDisplayValue("filter[scan__in]", "scan-2", {
scans: [
@@ -146,17 +133,17 @@ describe("getFindingsFilterDisplayValue", () => {
}),
],
}),
).toBe("AWS - Weekly scan");
).toBe("Weekly scan");
});
it("returns only the provider name when the scan name is missing", () => {
it("falls back to the provider alias when the scan name is missing", () => {
expect(
getFindingsFilterDisplayValue("filter[scan__in]", "scan-3", {
scans: [
...scans,
makeScanMap("scan-3", {
providerInfo: {
provider: "gcp",
provider: "aws",
alias: "Fallback Account",
uid: "333333333333",
},
@@ -167,7 +154,7 @@ describe("getFindingsFilterDisplayValue", () => {
}),
],
}),
).toBe("Google Cloud");
).toBe("Fallback Account");
});
it("keeps the raw scan value when the scan cannot be resolved", () => {
@@ -198,85 +185,3 @@ describe("getFindingsFilterDisplayValue", () => {
).toBe("2026-04-07");
});
});
describe("buildFindingsFilterChips", () => {
it("creates one chip per value with normalized labels", () => {
// Given — this is the exact pending state derived from the LinkToFindings URL:
// /findings?sort=...&filter[status__in]=FAIL&filter[delta]=new
const pendingFilters = {
"filter[status__in]": ["FAIL"],
"filter[delta]": ["new"],
};
// When
const chips = buildFindingsFilterChips(pendingFilters);
// Then — both chips must appear; the delta chip must use "Delta" as label
// (not the raw "filter[delta]") and "New" as displayValue (not "NEW" via
// the short-word acronym heuristic in formatLabel).
expect(chips).toEqual([
{
key: "filter[status__in]",
label: "Status",
value: "FAIL",
displayValue: "Fail",
},
{
key: "filter[delta]",
label: "Delta",
value: "new",
displayValue: "New",
},
]);
});
it("treats filter[delta] and filter[delta__in] identically", () => {
// Given
const chipsSingular = buildFindingsFilterChips({
"filter[delta]": ["new", "changed"],
});
const chipsPlural = buildFindingsFilterChips({
"filter[delta__in]": ["new", "changed"],
});
// Then — both shapes produce the same human labels and display values
expect(
chipsSingular.map((c) => ({ label: c.label, v: c.displayValue })),
).toEqual([
{ label: "Delta", v: "New" },
{ label: "Delta", v: "Changed" },
]);
expect(
chipsPlural.map((c) => ({ label: c.label, v: c.displayValue })),
).toEqual([
{ label: "Delta", v: "New" },
{ label: "Delta", v: "Changed" },
]);
});
it("skips the silent default filter[muted]=false", () => {
const chips = buildFindingsFilterChips({
"filter[muted]": ["false"],
"filter[delta]": ["new"],
});
// Only the delta chip — the default muted=false should not surface
expect(chips).toHaveLength(1);
expect(chips[0].key).toBe("filter[delta]");
});
it("surfaces unmapped keys using the raw key as label (fallback)", () => {
const chips = buildFindingsFilterChips({
"filter[unknown_future_key]": ["value"],
});
expect(chips).toEqual([
{
key: "filter[unknown_future_key]",
label: "filter[unknown_future_key]",
value: "value",
displayValue: "Value",
},
]);
});
});
@@ -1,8 +1,5 @@
import type { FilterChip } from "@/components/filters/filter-summary-strip";
import { formatLabel, getCategoryLabel, getGroupLabel } from "@/lib/categories";
import { getScanEntityLabel } from "@/lib/helper-filters";
import { FINDING_STATUS_DISPLAY_NAMES } from "@/types";
import { FilterParam } from "@/types/filters";
import { getProviderDisplayName, ProviderProps } from "@/types/providers";
import { ScanEntity } from "@/types/scans";
import { SEVERITY_DISPLAY_NAMES } from "@/types/severities";
@@ -38,7 +35,12 @@ function getScanDisplayValue(
return scanId;
}
return getScanEntityLabel(scan) || scanId;
return (
scan.attributes.name ||
scan.providerInfo.alias ||
scan.providerInfo.uid ||
scanId
);
}
export function getFindingsFilterDisplayValue(
@@ -53,7 +55,7 @@ export function getFindingsFilterDisplayValue(
if (filterKey === "filter[provider_id__in]") {
return getProviderAccountDisplayValue(value, options.providers || []);
}
if (filterKey === "filter[scan__in]" || filterKey === "filter[scan]") {
if (filterKey === "filter[scan__in]") {
return getScanDisplayValue(value, options.scans || []);
}
if (filterKey === "filter[severity__in]") {
@@ -70,7 +72,7 @@ export function getFindingsFilterDisplayValue(
] ?? formatLabel(value)
);
}
if (filterKey === "filter[delta__in]" || filterKey === "filter[delta]") {
if (filterKey === "filter[delta__in]") {
return (
FINDING_DELTA_DISPLAY_NAMES[value.toLowerCase()] ?? formatLabel(value)
);
@@ -91,67 +93,3 @@ export function getFindingsFilterDisplayValue(
return formatLabel(value);
}
/**
* Maps raw filter param keys (e.g. "filter[severity__in]") to human-readable labels.
* Used to render chips in the FilterSummaryStrip.
* Typed as Record<FilterParam, string> so TypeScript enforces exhaustiveness any
* addition to FilterParam will cause a compile error here if the label is missing.
*/
export const FILTER_KEY_LABELS: Record<FilterParam, string> = {
"filter[provider_type__in]": "Provider",
"filter[provider_id__in]": "Account",
"filter[severity__in]": "Severity",
"filter[status__in]": "Status",
"filter[delta__in]": "Delta",
"filter[delta]": "Delta",
"filter[region__in]": "Region",
"filter[service__in]": "Service",
"filter[resource_type__in]": "Resource Type",
"filter[category__in]": "Category",
"filter[resource_groups__in]": "Resource Group",
"filter[scan]": "Scan",
"filter[scan__in]": "Scan",
"filter[scan_id]": "Scan",
"filter[scan_id__in]": "Scan",
"filter[inserted_at]": "Date",
"filter[muted]": "Muted",
};
interface BuildFindingsFilterChipsOptions {
providers?: ProviderProps[];
scans?: Array<{ [scanId: string]: ScanEntity }>;
}
/**
* Builds the chips displayed in the FilterSummaryStrip from a pendingFilters map.
*
* - One chip per individual value (not one per key), so a multi-select filter
* produces multiple chips.
* - Silently skips the default `filter[muted]=false` so it doesn't appear as a
* user-applied filter.
* - Falls back to the raw key as label for unmapped keys, so an unexpected
* param still surfaces instead of disappearing.
*/
export function buildFindingsFilterChips(
pendingFilters: Record<string, string[]>,
options: BuildFindingsFilterChipsOptions = {},
): FilterChip[] {
const chips: FilterChip[] = [];
Object.entries(pendingFilters).forEach(([key, values]) => {
if (!values || values.length === 0) return;
const label = FILTER_KEY_LABELS[key as FilterParam] ?? key;
values.forEach((value) => {
if (key === "filter[muted]" && value === "false") return;
chips.push({
key,
label,
value,
displayValue: getFindingsFilterDisplayValue(key, value, options),
});
});
});
return chips;
}
@@ -78,18 +78,6 @@ vi.mock("./notification-indicator", () => ({
},
}));
vi.mock("@/components/shadcn/tooltip", () => ({
Tooltip: ({ children }: { children: ReactNode }) => <>{children}</>,
TooltipContent: ({ children }: { children: ReactNode }) => <>{children}</>,
TooltipTrigger: ({ children }: { children: ReactNode }) => <>{children}</>,
}));
vi.mock("./provider-icon-cell", () => ({
ProviderIconCell: ({ provider }: { provider: string }) => (
<span data-testid={`provider-icon-${provider}`}>{provider}</span>
),
}));
// ---------------------------------------------------------------------------
// Import after mocks
// ---------------------------------------------------------------------------
@@ -160,26 +148,6 @@ function renderFindingCell(
render(<div>{CellComponent({ row: { original: group } })}</div>);
}
function renderFindingGroupTitleCell(overrides?: Partial<FindingGroupRow>) {
const columns = getColumnFindingGroups({
rowSelection: {},
selectableRowCount: 1,
onDrillDown: vi.fn(),
});
const findingColumn = columns.find(
(col) => (col as { accessorKey?: string }).accessorKey === "finding",
);
if (!findingColumn?.cell) throw new Error("finding column not found");
const group = makeGroup(overrides);
const CellComponent = findingColumn.cell as (props: {
row: { original: FindingGroupRow };
}) => ReactNode;
render(<div>{CellComponent({ row: { original: group } })}</div>);
}
function renderImpactedResourcesCell(overrides?: Partial<FindingGroupRow>) {
const columns = getColumnFindingGroups({
rowSelection: {},
@@ -203,13 +171,11 @@ function renderImpactedResourcesCell(overrides?: Partial<FindingGroupRow>) {
}
function renderSelectCell(overrides?: Partial<FindingGroupRow>) {
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
const toggleSelected = vi.fn();
const columns = getColumnFindingGroups({
rowSelection: {},
selectableRowCount: 1,
onDrillDown,
onDrillDown: vi.fn(),
});
const selectColumn = columns.find(
@@ -240,7 +206,7 @@ function renderSelectCell(overrides?: Partial<FindingGroupRow>) {
</div>,
);
return { onDrillDown, toggleSelected };
return { toggleSelected };
}
// ---------------------------------------------------------------------------
@@ -265,15 +231,6 @@ describe("column-finding-groups — accessibility of check title cell", () => {
expect(impactedProvidersColumn).toBeUndefined();
});
it("should render the first provider icon with its provider name", () => {
// Given
renderFindingGroupTitleCell({ providers: ["iac"] });
// Then
expect(screen.getByTestId("provider-icon-iac")).toBeInTheDocument();
expect(screen.getByText("Infrastructure as Code")).toBeInTheDocument();
});
it("should render the check title as a button element (not a <p>)", () => {
// Given
const onDrillDown =
@@ -375,47 +332,6 @@ describe("column-finding-groups — accessibility of check title cell", () => {
}),
);
});
it("should keep zero-resource fallback groups non-clickable even when fallback counts are present", () => {
// Given
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
renderFindingCell("Fallback IaC Check", onDrillDown, {
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 2,
manualCount: 1,
});
// Then
expect(
screen.queryByRole("button", { name: "Fallback IaC Check" }),
).not.toBeInTheDocument();
expect(screen.getByText("Fallback IaC Check")).toBeInTheDocument();
expect(onDrillDown).not.toHaveBeenCalled();
});
it("should keep fallback groups non-clickable when the displayed total is zero", () => {
// Given
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
// When
renderFindingCell("No failing findings", onDrillDown, {
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 0,
});
// Then
expect(
screen.queryByRole("button", { name: "No failing findings" }),
).not.toBeInTheDocument();
expect(screen.getByText("No failing findings")).toBeInTheDocument();
});
});
describe("column-finding-groups — impacted resources count", () => {
@@ -429,36 +345,6 @@ describe("column-finding-groups — impacted resources count", () => {
// Then
expect(screen.getByText("3/5")).toBeInTheDocument();
});
it("should fall back to finding counts when resources total is zero", () => {
// Given/When
renderImpactedResourcesCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
muted: false,
});
// Then
expect(screen.getByText("3/5")).toBeInTheDocument();
});
it("should include muted findings in the denominator when the row is muted", () => {
// Given/When
renderImpactedResourcesCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
failMutedCount: 4,
passMutedCount: 1,
muted: true,
});
// Then
expect(screen.getByText("3/10")).toBeInTheDocument();
});
});
describe("column-finding-groups — group selection", () => {
@@ -471,42 +357,6 @@ describe("column-finding-groups — group selection", () => {
expect(screen.getByRole("checkbox", { name: "Select row" })).toBeDisabled();
});
it("should hide the chevron for zero-resource fallback groups even when fallback counts are present", () => {
// Given
const { onDrillDown } = renderSelectCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 2,
manualCount: 1,
});
// Then
expect(
screen.queryByRole("button", {
name: "Expand S3 Bucket Public Access",
}),
).not.toBeInTheDocument();
expect(onDrillDown).not.toHaveBeenCalled();
});
it("should hide the chevron for zero-resource groups when the displayed total is zero", () => {
// Given/When
renderSelectCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 0,
});
// Then
expect(
screen.queryByRole("button", {
name: "Expand S3 Bucket Public Access",
}),
).not.toBeInTheDocument();
});
});
describe("column-finding-groups — indicators", () => {
@@ -4,11 +4,6 @@ import { ColumnDef, RowSelectionState } from "@tanstack/react-table";
import { ChevronRight } from "lucide-react";
import { Checkbox } from "@/components/shadcn";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import {
DataTableColumnHeader,
SeverityBadge,
@@ -16,19 +11,15 @@ import {
} from "@/components/ui/table";
import { cn } from "@/lib";
import {
canDrillDownFindingGroup,
getFilteredFindingGroupDelta,
getFindingGroupImpactedCounts,
isFindingGroupMuted,
} from "@/lib/findings-groups";
import { FindingGroupRow } from "@/types";
import { getProviderDisplayName } from "@/types/providers";
import { DataTableRowActions } from "./data-table-row-actions";
import { canMuteFindingGroup } from "./finding-group-selection";
import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { NotificationIndicator } from "./notification-indicator";
import { ProviderIconCell } from "./provider-icon-cell";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
interface GetColumnFindingGroupsOptions {
rowSelection: RowSelectionState;
@@ -92,7 +83,14 @@ export function getColumnFindingGroups({
const allMuted = isFindingGroupMuted(group);
const isExpanded = expandedCheckId === group.checkId;
const deltaKey = getFilteredFindingGroupDelta(group, filters);
const canExpand = canDrillDownFindingGroup(group);
const delta =
deltaKey === "new"
? DeltaValues.NEW
: deltaKey === "changed"
? DeltaValues.CHANGED
: DeltaValues.NONE;
const canExpand = group.resourcesTotal > 0;
const canSelect = canMuteFindingGroup({
resourcesFail: group.resourcesFail,
resourcesTotal: group.resourcesTotal,
@@ -103,7 +101,7 @@ export function getColumnFindingGroups({
return (
<div className="flex items-center gap-2">
<NotificationIndicator
delta={deltaKey}
delta={delta}
isMuted={allMuted}
showDeltaWhenMuted
/>
@@ -177,43 +175,23 @@ export function getColumnFindingGroups({
),
cell: ({ row }) => {
const group = row.original;
const canExpand = canDrillDownFindingGroup(group);
const provider = group.providers[0];
const providerName = provider
? getProviderDisplayName(provider)
: undefined;
const canExpand = group.resourcesTotal > 0;
return (
<div className="flex items-center gap-2">
{provider && providerName ? (
<Tooltip>
<TooltipTrigger asChild>
<div className="shrink-0">
<ProviderIconCell
provider={provider}
size={20}
className="size-5 rounded-none bg-transparent"
/>
</div>
</TooltipTrigger>
<TooltipContent side="top">{providerName}</TooltipContent>
</Tooltip>
) : null}
<div>
{canExpand ? (
<button
type="button"
className="text-text-neutral-primary hover:text-button-tertiary w-full cursor-pointer border-none bg-transparent p-0 text-left text-sm break-words whitespace-normal hover:underline"
onClick={() => onDrillDown(group.checkId, group)}
>
{group.checkTitle}
</button>
) : (
<span className="text-text-neutral-primary w-full text-left text-sm break-words whitespace-normal">
{group.checkTitle}
</span>
)}
</div>
<div>
{canExpand ? (
<button
type="button"
className="text-text-neutral-primary hover:text-button-tertiary w-full cursor-pointer border-none bg-transparent p-0 text-left text-sm break-words whitespace-normal hover:underline"
onClick={() => onDrillDown(group.checkId, group)}
>
{group.checkTitle}
</button>
) : (
<span className="text-text-neutral-primary w-full text-left text-sm break-words whitespace-normal">
{group.checkTitle}
</span>
)}
</div>
);
},
@@ -238,11 +216,10 @@ export function getColumnFindingGroups({
),
cell: ({ row }) => {
const group = row.original;
const counts = getFindingGroupImpactedCounts(group);
return (
<ImpactedResourcesCell
impacted={counts.impacted}
total={counts.total}
impacted={group.resourcesFail}
total={group.resourcesTotal}
/>
);
},
@@ -1,15 +1,15 @@
"use client";
import { ColumnDef } from "@tanstack/react-table";
import { Container } from "lucide-react";
import { Database } from "lucide-react";
import { DateWithTime, EntityInfo } from "@/components/ui/entities";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { DateWithTime } from "@/components/ui/entities";
import {
DataTableColumnHeader,
SeverityBadge,
StatusFindingBadge,
} from "@/components/ui/table";
import { getRegionFlag } from "@/lib/region-flags";
import { FindingProps, ProviderType } from "@/types";
import { FindingDetailDrawer } from "./finding-detail-drawer";
@@ -126,25 +126,18 @@ export function getStandaloneFindingColumns({
<DataTableColumnHeader column={column} title="Resource name" />
),
cell: ({ row }) => {
const name = getResourceData(row, "name");
const uid = getResourceData(row, "uid");
const entityAlias =
typeof name === "string" && name.trim().length > 0 && name !== "-"
? name
: undefined;
const entityId =
typeof uid === "string" && uid.trim().length > 0 && uid !== "-"
? uid
: undefined;
const resourceName = getResourceData(row, "name");
if (resourceName === "-") {
return <p className="text-text-neutral-primary text-sm">-</p>;
}
return (
<div className="max-w-[240px]">
<EntityInfo
nameIcon={<Container className="size-4" />}
entityAlias={entityAlias}
entityId={entityId}
/>
</div>
<CodeSnippet
value={resourceName as string}
formatter={(value: string) => `...${value.slice(-10)}`}
icon={<Database size={16} />}
/>
);
},
enableSorting: false,
@@ -168,17 +161,12 @@ export function getStandaloneFindingColumns({
{
accessorKey: "provider",
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Cloud Provider" />
<DataTableColumnHeader column={column} title="Provider" />
),
cell: ({ row }) => {
const provider = getProviderData(row, "provider");
return (
<ProviderIconCell
provider={provider as ProviderType}
className="size-8"
/>
);
return <ProviderIconCell provider={provider as ProviderType} />;
},
enableSorting: false,
},
@@ -205,17 +193,10 @@ export function getStandaloneFindingColumns({
cell: ({ row }) => {
const region = getResourceData(row, "region");
const regionText = typeof region === "string" ? region : "-";
const regionFlag =
typeof region === "string" ? getRegionFlag(region) : "";
return (
<span className="text-text-neutral-primary flex max-w-[140px] items-center gap-1.5 truncate text-sm">
{regionFlag && (
<span className="translate-y-px text-base leading-none">
{regionFlag}
</span>
)}
<span className="truncate">{regionText}</span>
</span>
<p className="text-text-neutral-primary max-w-[120px] truncate text-sm">
{regionText}
</p>
);
},
enableSorting: false,
@@ -30,6 +30,7 @@ export function FindingDetailDrawer({
}: FindingDetailDrawerProps) {
const drawer = useResourceDetailDrawer({
resources: [findingToFindingResourceRow(finding)],
checkId: finding.attributes.check_id,
totalResourceCount: 1,
initialIndex: defaultOpen || inline ? 0 : null,
});
@@ -62,7 +63,6 @@ export function FindingDetailDrawer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
@@ -87,7 +87,6 @@ export function FindingDetailDrawer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
@@ -22,7 +22,6 @@ import { useFindingGroupResourceState } from "@/hooks/use-finding-group-resource
import { cn, hasHistoricalFindingFilter } from "@/lib";
import {
getFilteredFindingGroupDelta,
getFindingGroupImpactedCounts,
isFindingGroupMuted,
} from "@/lib/findings-groups";
import { FindingGroupRow } from "@/types";
@@ -31,8 +30,7 @@ import { FloatingMuteButton } from "../floating-mute-button";
import { getColumnFindingResources } from "./column-finding-resources";
import { FindingsSelectionContext } from "./findings-selection-context";
import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { getFindingGroupEmptyStateMessage } from "./inline-resource-container.utils";
import { NotificationIndicator } from "./notification-indicator";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
interface FindingsGroupDrillDownProps {
@@ -98,8 +96,14 @@ export function FindingsGroupDrillDown({
// Delta for the sticky header
const deltaKey = getFilteredFindingGroupDelta(group, filters);
const delta =
deltaKey === "new"
? DeltaValues.NEW
: deltaKey === "changed"
? DeltaValues.CHANGED
: DeltaValues.NONE;
const allMuted = isFindingGroupMuted(group);
const impactedCounts = getFindingGroupImpactedCounts(group);
const rows = table.getRowModel().rows;
@@ -135,7 +139,7 @@ export function FindingsGroupDrillDown({
{/* Notification indicator */}
<NotificationIndicator
delta={deltaKey}
delta={delta}
isMuted={allMuted}
showDeltaWhenMuted
/>
@@ -155,8 +159,8 @@ export function FindingsGroupDrillDown({
{/* Impacted resources count */}
<ImpactedResourcesCell
impacted={impactedCounts.impacted}
total={impactedCounts.total}
impacted={group.resourcesFail}
total={group.resourcesTotal}
/>
</div>
</div>
@@ -205,7 +209,9 @@ export function FindingsGroupDrillDown({
colSpan={columns.length}
className="h-24 text-center"
>
{getFindingGroupEmptyStateMessage(group, filters)}
{Object.keys(filters).length > 0
? "No resources found for the selected filters."
: "No resources found."}
</TableCell>
</TableRow>
) : null}
@@ -242,10 +248,8 @@ export function FindingsGroupDrillDown({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
showSyntheticResourceHint={group.resourcesTotal === 0}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleDrawerMuteComplete}
@@ -6,7 +6,6 @@ import { useRef, useState } from "react";
import { resolveFindingIdsByVisibleGroupResources } from "@/actions/findings/findings-by-resource";
import { DataTable } from "@/components/ui/table";
import { canDrillDownFindingGroup } from "@/lib/findings-groups";
import { FindingGroupRow, MetaDataProps } from "@/types";
import { FloatingMuteButton } from "../floating-mute-button";
@@ -141,7 +140,7 @@ export function FindingsGroupTable({
const handleDrillDown = (checkId: string, group: FindingGroupRow) => {
// No resources in the group → nothing to show, skip drill-down
if (!canDrillDownFindingGroup(group)) return;
if (group.resourcesTotal === 0) return;
// Toggle: same group = collapse, different = switch
if (expandedCheckId === checkId) {
@@ -20,7 +20,6 @@ import { getColumnFindingResources } from "./column-finding-resources";
import { FindingsSelectionContext } from "./findings-selection-context";
import {
getFilteredFindingGroupResourceCount,
getFindingGroupEmptyStateMessage,
getFindingGroupSkeletonCount,
} from "./inline-resource-container.utils";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
@@ -279,7 +278,9 @@ export function InlineResourceContainer({
colSpan={columns.length}
className="h-24 text-center"
>
{getFindingGroupEmptyStateMessage(group, filters)}
{Object.keys(filters).length > 0
? "No resources found for the selected filters."
: "No resources found."}
</TableCell>
</TableRow>
)}
@@ -333,10 +334,8 @@ export function InlineResourceContainer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
showSyntheticResourceHint={group.resourcesTotal === 0}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleDrawerMuteComplete}
@@ -4,7 +4,6 @@ import type { FindingGroupRow } from "@/types";
import {
getFilteredFindingGroupResourceCount,
getFindingGroupEmptyStateMessage,
getFindingGroupSkeletonCount,
isFailOnlyStatusFilter,
} from "./inline-resource-container.utils";
@@ -100,47 +99,3 @@ describe("getFindingGroupSkeletonCount", () => {
).toBe(1);
});
});
describe("getFindingGroupEmptyStateMessage", () => {
it("returns the muted hint when muted findings are excluded and no visible resources remain", () => {
expect(
getFindingGroupEmptyStateMessage(
makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
mutedCount: 1,
failCount: 0,
passCount: 0,
}),
{
"filter[status]": "FAIL",
"filter[muted]": "false",
},
),
).toBe(
"No resources match the current filters. Try enabling Include muted to view muted findings.",
);
});
it("keeps the generic filtered empty state when muted findings are already included", () => {
expect(
getFindingGroupEmptyStateMessage(
makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
mutedCount: 1,
}),
{
"filter[status]": "FAIL",
"filter[muted]": "include",
},
),
).toBe("No resources found for the selected filters.");
});
it("keeps the generic empty state when no filters are active", () => {
expect(getFindingGroupEmptyStateMessage(makeGroup(), {})).toBe(
"No resources found.",
);
});
});
@@ -33,18 +33,6 @@ export function isFailOnlyStatusFilter(
return multiStatusValues.length === 1 && multiStatusValues[0] === "FAIL";
}
function includesMutedFindings(
filters: Record<string, string | string[] | undefined>,
): boolean {
const mutedFilter = filters["filter[muted]"];
if (Array.isArray(mutedFilter)) {
return mutedFilter.includes("include");
}
return mutedFilter === "include";
}
export function getFilteredFindingGroupResourceCount(
group: FindingGroupRow,
filters: Record<string, string | string[] | undefined>,
@@ -65,24 +53,3 @@ export function getFindingGroupSkeletonCount(
// empty state ("No resources found") replaces the skeleton.
return Math.max(1, Math.min(filteredTotal, maxSkeletonRows));
}
export function getFindingGroupEmptyStateMessage(
group: FindingGroupRow,
filters: Record<string, string | string[] | undefined>,
): string {
const hasFilters = Object.keys(filters).length > 0;
if (!hasFilters) {
return "No resources found.";
}
const mutedExcluded = !includesMutedFindings(filters);
const hasMutedFindings = (group.mutedCount ?? 0) > 0;
const visibleCount = getFilteredFindingGroupResourceCount(group, filters);
if (mutedExcluded && hasMutedFindings && visibleCount === 0) {
return "No resources match the current filters. Try enabling Include muted to view muted findings.";
}
return "No resources found for the selected filters.";
}
@@ -17,11 +17,14 @@ import {
} from "@/components/shadcn/tooltip";
import { DOCS_URLS } from "@/lib/external-urls";
import { cn } from "@/lib/utils";
import { FINDING_DELTA, type FindingDelta } from "@/types";
export const DeltaValues = FINDING_DELTA;
export const DeltaValues = {
NEW: "new",
CHANGED: "changed",
NONE: "none",
} as const;
export type DeltaType = Exclude<FindingDelta, null>;
export type DeltaType = (typeof DeltaValues)[keyof typeof DeltaValues];
interface NotificationIndicatorProps {
delta?: DeltaType;
@@ -121,12 +124,12 @@ function MutedIndicator({ mutedReason }: { mutedReason?: string }) {
<PopoverTrigger asChild>
<button
type="button"
className="flex w-5 shrink-0 cursor-pointer items-center justify-center bg-transparent p-0"
className="flex w-4 shrink-0 cursor-pointer items-center justify-center bg-transparent p-0"
onClick={(e) => e.stopPropagation()}
onMouseEnter={() => setOpen(true)}
onMouseLeave={() => setOpen(false)}
>
<MutedIcon className="text-bg-data-muted size-3" />
<MutedIcon className="text-bg-data-muted size-2" />
</button>
</PopoverTrigger>
<PopoverContent
@@ -14,14 +14,12 @@ const {
mockWindowOpen,
mockClipboardWriteText,
mockSearchParamsState,
mockNotificationIndicator,
} = vi.hoisted(() => ({
mockGetComplianceIcon: vi.fn((_: string) => null as string | null),
mockGetCompliancesOverview: vi.fn(),
mockWindowOpen: vi.fn(),
mockClipboardWriteText: vi.fn(),
mockSearchParamsState: { value: "" },
mockNotificationIndicator: vi.fn(),
}));
vi.mock("next/navigation", () => ({
@@ -136,12 +134,7 @@ vi.mock("@/components/shadcn/dropdown", () => ({
}));
vi.mock("@/components/shadcn/skeleton/skeleton", () => ({
Skeleton: ({
className,
...props
}: HTMLAttributes<HTMLDivElement> & { className?: string }) => (
<div data-testid="inline-skeleton" className={className} {...props} />
),
Skeleton: () => <div />,
}));
vi.mock("@/components/shadcn/spinner/spinner", () => ({
@@ -300,11 +293,7 @@ vi.mock("../delta-indicator", () => ({
}));
vi.mock("../notification-indicator", () => ({
NotificationIndicator: (props: Record<string, unknown>) => {
mockNotificationIndicator(props);
return null;
},
DeltaValues: { NEW: "new", CHANGED: "changed", NONE: "none" } as const,
NotificationIndicator: () => null,
}));
vi.mock("./resource-detail-skeleton", () => ({
@@ -320,7 +309,6 @@ vi.mock("../../muted", () => ({
// ---------------------------------------------------------------------------
import type { ResourceDrawerFinding } from "@/actions/findings";
import type { FindingResourceRow } from "@/types";
import { ResourceDetailDrawerContent } from "./resource-detail-drawer-content";
import type { CheckMeta } from "./use-resource-detail-drawer";
@@ -386,29 +374,6 @@ const mockFinding: ResourceDrawerFinding = {
scan: null,
};
const mockResourceRow: FindingResourceRow = {
id: "row-1",
rowType: "resource",
findingId: "finding-1",
checkId: "s3_check",
providerType: "aws",
providerAlias: "prod",
providerUid: "123456789",
resourceName: "my-bucket",
resourceType: "Bucket",
resourceGroup: "default",
resourceUid: "arn:aws:s3:::bucket",
service: "s3",
region: "us-east-1",
severity: "critical",
status: "FAIL",
delta: null,
isMuted: false,
mutedReason: undefined,
firstSeenAt: null,
lastSeenAt: null,
};
// ---------------------------------------------------------------------------
// Fix 1: Lighthouse AI button text change
// ---------------------------------------------------------------------------
@@ -972,461 +937,3 @@ describe("ResourceDetailDrawerContent — other findings mute refresh", () => {
expect(onMuteComplete).not.toHaveBeenCalled();
});
});
describe("ResourceDetailDrawerContent — synthetic resource empty state", () => {
it("should explain that simulated IaC resources never have other findings", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentFinding={mockFinding}
otherFindings={[]}
showSyntheticResourceHint
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByText(
"No other findings are available for this IaC resource.",
),
).toBeInTheDocument();
});
});
describe("ResourceDetailDrawerContent — current resource row display", () => {
it("should render resource card fields from the current resource row instead of the fetched finding", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
providerAlias: "row-account",
providerUid: "row-provider-uid",
resourceName: "row-resource-name",
resourceUid: "row-resource-uid",
service: "row-service",
region: "eu-west-1",
resourceType: "row-type",
resourceGroup: "row-group",
severity: "low",
status: "PASS",
};
const fetchedFinding: ResourceDrawerFinding = {
...mockFinding,
providerAlias: "finding-account",
providerUid: "finding-provider-uid",
resourceName: "finding-resource-name",
resourceUid: "finding-resource-uid",
resourceService: "finding-service",
resourceRegion: "ap-south-1",
resourceType: "finding-type",
resourceGroup: "finding-group",
severity: "critical",
status: "FAIL",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentResource={currentResource}
currentFinding={fetchedFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("row-service")).toBeInTheDocument();
expect(screen.getByText("eu-west-1")).toBeInTheDocument();
expect(screen.getByText("row-group")).toBeInTheDocument();
expect(screen.getByText("row-type")).toBeInTheDocument();
expect(screen.getByText("FAIL")).toBeInTheDocument();
expect(screen.getByText("critical")).toBeInTheDocument();
expect(screen.queryByText("finding-service")).not.toBeInTheDocument();
expect(screen.queryByText("ap-south-1")).not.toBeInTheDocument();
expect(screen.queryByText("finding-group")).not.toBeInTheDocument();
expect(screen.queryByText("finding-type")).not.toBeInTheDocument();
});
it("should prefer the fetched finding status and severity in the header when the current row is stale", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
severity: "critical",
status: "FAIL",
isMuted: false,
};
const fetchedFinding: ResourceDrawerFinding = {
...mockFinding,
severity: "low",
status: "PASS",
isMuted: true,
mutedReason: "Muted after refresh",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentResource={currentResource}
currentFinding={fetchedFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
expect(screen.queryByText("FAIL")).not.toBeInTheDocument();
expect(screen.queryByText("critical")).not.toBeInTheDocument();
});
});
describe("ResourceDetailDrawerContent — header skeleton while navigating", () => {
it("should keep row-backed navigation chrome visible while hiding stale finding details during carousel navigation", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: mockCheckMeta.checkId,
resourceName: "next-bucket",
resourceUid: "next-resource-uid",
service: "ec2",
region: "eu-west-1",
resourceType: "Instance",
resourceGroup: "row-group",
severity: "low",
status: "PASS",
findingId: "finding-2",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
expect(screen.getByText("ec2")).toBeInTheDocument();
expect(screen.getByText("eu-west-1")).toBeInTheDocument();
expect(screen.getByText("row-group")).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Finding Overview" }),
).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Other Findings For This Resource" }),
).toBeInTheDocument();
expect(screen.queryByText("uid-1")).not.toBeInTheDocument();
expect(screen.queryByText("Status extended")).not.toBeInTheDocument();
expect(screen.queryByText("FAIL")).not.toBeInTheDocument();
expect(screen.queryByText("critical")).not.toBeInTheDocument();
});
it("should skeletonize stale check-level header content when navigating to a different check", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: "ec2_check",
findingId: "finding-2",
severity: "low",
status: "PASS",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByTestId("drawer-header-skeleton")).toBeInTheDocument();
expect(screen.queryByText("S3 Check")).not.toBeInTheDocument();
expect(screen.queryByText("PCI-DSS")).not.toBeInTheDocument();
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
});
it("should keep same-check overview sections visible while hiding stale finding-specific details during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("Risk:")).toBeInTheDocument();
expect(screen.getByText("Description:")).toBeInTheDocument();
expect(screen.getByText("Remediation:")).toBeInTheDocument();
expect(screen.getByText("security")).toBeInTheDocument();
expect(screen.queryByText("Status Extended:")).not.toBeInTheDocument();
expect(screen.queryByText("uid-1")).not.toBeInTheDocument();
expect(
screen.queryByRole("link", {
name: "Analyze This Finding With Lighthouse AI",
}),
).not.toBeInTheDocument();
});
it("should keep the overview tab shell visible with section skeletons when navigating to a different check", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: "ec2_check",
findingId: "finding-2",
severity: "low",
status: "PASS",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByTestId("overview-navigation-skeleton"),
).toBeInTheDocument();
expect(screen.queryByText("Risk:")).not.toBeInTheDocument();
expect(screen.queryByText("Description:")).not.toBeInTheDocument();
expect(screen.queryByText("Remediation:")).not.toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Finding Overview" }),
).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Other Findings For This Resource" }),
).toBeInTheDocument();
});
it("should keep other findings table headers visible while skeletonizing only the rows during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("Status")).toBeInTheDocument();
expect(screen.getByText("Finding")).toBeInTheDocument();
expect(screen.getByText("Severity")).toBeInTheDocument();
expect(screen.getByText("Time")).toBeInTheDocument();
expect(
screen.getByTestId("other-findings-total-entries-skeleton"),
).toBeInTheDocument();
expect(
screen.getByTestId("other-findings-navigation-skeleton"),
).toBeInTheDocument();
});
it("should keep scans labels visible while skeletonizing only the scan values during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByText("Showing the latest scan that evaluated this finding"),
).toBeInTheDocument();
expect(screen.getByText("Scan Name")).toBeInTheDocument();
expect(screen.getByText("Resources Scanned")).toBeInTheDocument();
expect(screen.getByText("Progress")).toBeInTheDocument();
expect(screen.getByText("Trigger")).toBeInTheDocument();
expect(screen.getByText("State")).toBeInTheDocument();
expect(screen.getByText("Duration")).toBeInTheDocument();
expect(screen.getByText("Started At")).toBeInTheDocument();
expect(screen.getByText("Completed At")).toBeInTheDocument();
expect(screen.getByText("Launched At")).toBeInTheDocument();
expect(screen.getByText("Scheduled At")).toBeInTheDocument();
expect(screen.getByTestId("scans-navigation-skeleton")).toBeInTheDocument();
});
it("should keep the events tab shell visible while showing timeline row skeletons during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByRole("button", { name: "Events" })).toBeInTheDocument();
expect(
screen.getByTestId("events-navigation-skeleton"),
).toBeInTheDocument();
});
});
describe("ResourceDetailDrawerContent — other findings delta/muted indicator", () => {
const renderWithOtherFinding = (
overrides: Partial<ResourceDrawerFinding>,
) => {
const otherFinding: ResourceDrawerFinding = {
...mockFinding,
id: "finding-2",
uid: "uid-2",
checkId: "ec2_check",
checkTitle: "EC2 Check",
...overrides,
};
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentFinding={mockFinding}
otherFindings={[otherFinding]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
};
const lastNotificationIndicatorPropsForOtherRow = () => {
const calls = mockNotificationIndicator.mock.calls;
// Last call corresponds to the other-finding row (current finding row renders first).
return calls[calls.length - 1][0];
};
it("should forward delta='new' to the NotificationIndicator for a new other finding", () => {
renderWithOtherFinding({ delta: "new" });
expect(lastNotificationIndicatorPropsForOtherRow()).toMatchObject({
delta: "new",
isMuted: false,
showDeltaWhenMuted: true,
});
});
it("should forward delta='changed' to the NotificationIndicator for a changed other finding", () => {
renderWithOtherFinding({ delta: "changed" });
expect(lastNotificationIndicatorPropsForOtherRow()).toMatchObject({
delta: "changed",
});
});
it("should pass delta=undefined when the finding has delta='none'", () => {
renderWithOtherFinding({ delta: "none" });
expect(lastNotificationIndicatorPropsForOtherRow()).toMatchObject({
delta: undefined,
});
});
it("should forward mutedReason and keep delta when a muted other finding is also new", () => {
renderWithOtherFinding({
delta: "new",
isMuted: true,
mutedReason: "False positive",
});
expect(lastNotificationIndicatorPropsForOtherRow()).toMatchObject({
delta: "new",
isMuted: true,
mutedReason: "False positive",
showDeltaWhenMuted: true,
});
});
});
File diff suppressed because it is too large Load Diff
@@ -11,7 +11,6 @@ import {
DrawerHeader,
DrawerTitle,
} from "@/components/shadcn";
import type { FindingResourceRow } from "@/types";
import { ResourceDetailDrawerContent } from "./resource-detail-drawer-content";
import type { CheckMeta } from "./use-resource-detail-drawer";
@@ -24,10 +23,8 @@ interface ResourceDetailDrawerProps {
checkMeta: CheckMeta | null;
currentIndex: number;
totalResources: number;
currentResource: FindingResourceRow | null;
currentFinding: ResourceDrawerFinding | null;
otherFindings: ResourceDrawerFinding[];
showSyntheticResourceHint?: boolean;
onNavigatePrev: () => void;
onNavigateNext: () => void;
onMuteComplete: () => void;
@@ -41,10 +38,8 @@ export function ResourceDetailDrawer({
checkMeta,
currentIndex,
totalResources,
currentResource,
currentFinding,
otherFindings,
showSyntheticResourceHint = false,
onNavigatePrev,
onNavigateNext,
onMuteComplete,
@@ -69,10 +64,8 @@ export function ResourceDetailDrawer({
checkMeta={checkMeta}
currentIndex={currentIndex}
totalResources={totalResources}
currentResource={currentResource}
currentFinding={currentFinding}
otherFindings={otherFindings}
showSyntheticResourceHint={showSyntheticResourceHint}
onNavigatePrev={onNavigatePrev}
onNavigateNext={onNavigateNext}
onMuteComplete={onMuteComplete}

Some files were not shown because too many files have changed in this diff Show More