Compare commits

..

45 Commits

Author SHA1 Message Date
dependabot[bot] 6a63bcfe03 chore(deps): bump python-dotenv from 1.1.1 to 1.2.2 in /mcp_server
Bumps [python-dotenv](https://github.com/theskumar/python-dotenv) from 1.1.1 to 1.2.2.
- [Release notes](https://github.com/theskumar/python-dotenv/releases)
- [Changelog](https://github.com/theskumar/python-dotenv/blob/main/CHANGELOG.md)
- [Commits](https://github.com/theskumar/python-dotenv/compare/v1.1.1...v1.2.2)

---
updated-dependencies:
- dependency-name: python-dotenv
  dependency-version: 1.2.2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-21 22:06:25 +00:00
Pepe Fagoaga 4ef7bbdb7c docs: how to configure AWS SDK Default for IAM Role authentication (#10807) 2026-04-21 18:46:18 +02:00
Pepe Fagoaga f2c5d2ec87 fix(aws): fallback lookup events to resource name (#10828) 2026-04-21 18:31:50 +02:00
Adrián Peña 61a62fd6e0 fix(api): treat muted findings as resolved in finding-groups status (#10825) 2026-04-21 17:31:44 +02:00
Raajhesh Kannaa Chidambaram 39911e3ab7 feat(github): add --repo-list-file flag for GitHub scanning (#10501)
Co-authored-by: Raajhesh Kannaa Chidambaram <495042+raajheshkannaa@users.noreply.github.com>
Co-authored-by: Andoni A. <14891798+andoniaf@users.noreply.github.com>
2026-04-21 15:31:34 +02:00
Alejandro Bailo bcce8d6236 fix(ui): centralize default muted findings filter on finding groups (#10818)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2026-04-21 14:26:51 +02:00
Pablo Fernandez Guerra (PFE) 570c86948e chore: prek workspace for UI + builtin hooks + parallel execution (#10651)
Co-authored-by: Rubén De la Torre Vico <ruben@prowler.com>
Co-authored-by: Pablo F.G <pablo.fernandez@prowler.com>
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-21 13:26:07 +02:00
Adrián Peña 548389d79f perf(api): speed up finding-groups /resources endpoint (#10816) 2026-04-21 12:53:59 +02:00
Alejandro Bailo fc3066bc60 refactor(ui): redesign compliance page layout and components (#10767) 2026-04-21 12:48:57 +02:00
Pedro Martín ac6dd03fb8 feat(sdk): add universal compliance schema models and loaders (#10298) 2026-04-21 11:39:04 +02:00
Javier Grau d3a1df3473 chore(skills): centralize AI assistant config via symlinks (#9951)
Co-authored-by: Alan Buscaglia <gentlemanprogramming@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2026-04-21 09:29:42 +02:00
César Arroba 858dfc2a00 fix(ci): remove broken resolved_reference step from setup-python-poetry (#10687) 2026-04-21 08:58:24 +02:00
Pepe Fagoaga 6b0ba79652 fix(changelog): relocate entries for the SDK (#10812) 2026-04-21 08:17:14 +02:00
Pablo Fernandez Guerra (PFE) 390bbdd1a6 refactor(ui): remove backward-compat redirect for legacy invitation links (#10797)
Co-authored-by: Pablo F.G <pablo.fernandez@prowler.com>
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 08:11:51 +02:00
Pepe Fagoaga 8d48c26c1e chore(secrets): don't block for trufflehog (#10806) 2026-04-20 17:57:32 +02:00
Boon 98b9449e14 feat: add nginx reverse proxy configuration (#8516) (#10780)
Co-authored-by: Boon <boon@security8.work>
2026-04-20 17:30:21 +02:00
Pedro Martín 3406c5ec64 chore(skills): improve prowler-compliance (#10627) 2026-04-20 17:22:05 +02:00
Adrián Peña 4346401a0a fix(api): align latest_resources scan selection with completed_at (#10802) 2026-04-20 17:16:01 +02:00
dependabot[bot] dcec79d259 chore(deps): bump pyasn1 from 0.6.2 to 0.6.3 in /api (#10366) 2026-04-20 16:43:19 +02:00
Pepe Fagoaga 2a9c538aff chore: review changelog for v5.24.1 (#10791) 2026-04-20 14:01:29 +02:00
Pepe Fagoaga bf1b53bbd2 fix(ui): sorting and filtering for findings (#10778)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2026-04-20 13:34:31 +02:00
César Arroba 94a2ea1e8f chore: update CODEOWNERS for new team hierarchy (#10706) 2026-04-20 11:39:00 +02:00
Daniel Barranquero f7194b32de docs: remove prowler ctf page (#10782) 2026-04-20 09:37:30 +02:00
Pedro Martín 6ffe4e95bf fix(api): detect silent failures in ResourceFindingMapping (#10724)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2026-04-20 09:00:43 +02:00
Alan Buscaglia 577aa14acc fix(ui): correct IaC findings counters (#10736)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2026-04-17 12:48:57 +02:00
Andoni Alonso 19c752c127 fix(cloudflare): guard validate_credentials against paginator infinite loops (#10771) 2026-04-17 11:23:31 +02:00
Alejandro Bailo f2d35f5885 fix(ui): exclude muted findings and polish filter selectors (#10734) 2026-04-17 11:07:22 +02:00
Josema Camacho 536e90f2a5 perf(attack-paths): cleanup task prioritization, restore default batch sizes to 1000, upgrade Cartography to 0.135.0 (#10729) 2026-04-17 10:22:30 +02:00
Daniel Barranquero 276a5d66bd feat(docs): add ctf documentation (#10761) 2026-04-16 19:35:52 +02:00
Alejandro Bailo 489c6c1073 fix: CHANGELOG minor issue (#10758) 2026-04-16 17:07:22 +02:00
Adrián Peña b08b072288 fix(api): exclude muted findings from pass_count, fail_count and manual_count (#10753) 2026-04-16 15:56:08 +02:00
Josema Camacho ca29e354b6 chore(deps): bump msgraph-sdk to 1.55.0 and azure-mgmt-resource to 24.0.0, remove marshmallow (#10733) 2026-04-16 15:34:28 +02:00
Alejandro Bailo 85a3927950 fix(ui): upgrade React 19.2.5 and Next.js 16.2.3 to mitigate CVE-2026-23869 (#10752) 2026-04-16 15:24:10 +02:00
Rubén De la Torre Vico 04fe3f65e0 chore(deps): enable Dependabot pre-commit ecosystem and bump hooks (#10732) 2026-04-16 13:38:11 +02:00
Andoni Alonso 297c9d0734 fix(sdk): move #10726 changelog entry to unreleased version (#10728) 2026-04-16 13:10:00 +02:00
Erich Blume a2a1a73749 fix(image): --registry-list crashes with AttributeError on global_provider (#10691)
Co-authored-by: Andoni A. <14891798+andoniaf@users.noreply.github.com>
2026-04-16 13:02:25 +02:00
lydiavilchez 08fbe17e29 fix(googleworkspace): treat secure Google defaults as PASS for Drive checks (#10727) 2026-04-16 13:01:55 +02:00
lydiavilchez d920f78059 fix(googleworkspace): treat secure Google defaults as PASS for Calendar checks (#10726) 2026-04-16 12:51:40 +02:00
Pepe Fagoaga 12bf3d5e70 fix(db): add missing tenant_id filter in queries (#10722) 2026-04-16 11:55:38 +02:00
Adrián Peña 4002c28b5d fix(api): add fallback handling for missing resources in findings (#10708) 2026-04-16 11:45:06 +02:00
Andoni Alonso 2439f54280 fix(sdk): allow account-scoped tokens in Cloudflare connection test (#10723) 2026-04-16 11:38:15 +02:00
Prowler Bot b0e59156e6 chore(ui): Bump version to v5.25.0 (#10711)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:46 +02:00
Prowler Bot f013bd4a53 docs: Update version to v5.24.0 (#10714)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:17 +02:00
Prowler Bot 6ad15f900f chore(release): Bump version to v5.25.0 (#10710)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:06 +02:00
Prowler Bot 1784bf38ab chore(api): Bump version to v1.26.0 (#10715)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:13:33 +02:00
88 changed files with 5130 additions and 929 deletions
+1 -1
View File
@@ -145,7 +145,7 @@ SENTRY_RELEASE=local
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
#### Prowler release version ####
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.24.3
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.25.0
# Social login credentials
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
+12 -11
View File
@@ -1,14 +1,15 @@
# SDK
/* @prowler-cloud/sdk
/prowler/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/tests/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/dashboard/ @prowler-cloud/sdk
/docs/ @prowler-cloud/sdk
/examples/ @prowler-cloud/sdk
/util/ @prowler-cloud/sdk
/contrib/ @prowler-cloud/sdk
/permissions/ @prowler-cloud/sdk
/codecov.yml @prowler-cloud/sdk @prowler-cloud/api
/* @prowler-cloud/detection-remediation
/prowler/ @prowler-cloud/detection-remediation
/prowler/compliance/ @prowler-cloud/compliance
/tests/ @prowler-cloud/detection-remediation
/dashboard/ @prowler-cloud/detection-remediation
/docs/ @prowler-cloud/detection-remediation
/examples/ @prowler-cloud/detection-remediation
/util/ @prowler-cloud/detection-remediation
/contrib/ @prowler-cloud/detection-remediation
/permissions/ @prowler-cloud/detection-remediation
/codecov.yml @prowler-cloud/detection-remediation @prowler-cloud/api
# API
/api/ @prowler-cloud/api
@@ -17,7 +18,7 @@
/ui/ @prowler-cloud/ui
# AI
/mcp_server/ @prowler-cloud/ai
/mcp_server/ @prowler-cloud/detection-remediation
# Platform
/.github/ @prowler-cloud/platform
@@ -64,19 +64,6 @@ runs:
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update SDK resolved_reference to latest commit (prowler repo on push)
if: github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
sed -i '/url = "https:\/\/github\.com\/prowler-cloud\/prowler\.git"/,/resolved_reference = / {
s/resolved_reference = "[a-f0-9]\{40\}"/resolved_reference = "'"$LATEST_COMMIT"'"/
}' poetry.lock
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update poetry.lock (prowler repo only)
if: github.repository == 'prowler-cloud/prowler' && inputs.update-lock == 'true'
shell: bash
+12
View File
@@ -66,6 +66,18 @@ updates:
cooldown:
default-days: 7
- package-ecosystem: "pre-commit"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 25
target-branch: master
labels:
- "dependencies"
- "pre-commit"
cooldown:
default-days: 7
# Dependabot Updates are temporary disabled - 2025/04/15
# v4.6
# - package-ecosystem: "pip"
+6 -5
View File
@@ -27,11 +27,12 @@ jobs:
- name: Harden Runner
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
with:
egress-policy: block
allowed-endpoints: >
github.com:443
ghcr.io:443
pkg-containers.githubusercontent.com:443
# We can't block as Trufflehog needs to verify secrets against vendors
egress-policy: audit
# allowed-endpoints: >
# github.com:443
# ghcr.io:443
# pkg-containers.githubusercontent.com:443
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+9 -9
View File
@@ -1,7 +1,7 @@
repos:
## GENERAL
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v6.0.0
hooks:
- id: check-merge-conflict
- id: check-yaml
@@ -16,7 +16,7 @@ repos:
## TOML
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.13.0
rev: v2.16.0
hooks:
- id: pretty-format-toml
args: [--autofix]
@@ -24,21 +24,21 @@ repos:
## GITHUB ACTIONS
- repo: https://github.com/zizmorcore/zizmor-pre-commit
rev: v1.6.0
rev: v1.24.1
hooks:
- id: zizmor
files: ^\.github/
## BASH
- repo: https://github.com/koalaman/shellcheck-precommit
rev: v0.10.0
rev: v0.11.0
hooks:
- id: shellcheck
exclude: contrib
## PYTHON
- repo: https://github.com/myint/autoflake
rev: v2.3.1
rev: v2.3.3
hooks:
- id: autoflake
exclude: ^skills/
@@ -50,20 +50,20 @@ repos:
]
- repo: https://github.com/pycqa/isort
rev: 5.13.2
rev: 8.0.1
hooks:
- id: isort
exclude: ^skills/
args: ["--profile", "black"]
- repo: https://github.com/psf/black
rev: 24.4.2
rev: 26.3.1
hooks:
- id: black
exclude: ^skills/
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
rev: 7.3.0
hooks:
- id: flake8
exclude: (contrib|^skills/)
@@ -93,7 +93,7 @@ repos:
pass_filenames: false
- repo: https://github.com/hadolint/hadolint
rev: v2.13.0-beta
rev: v2.14.0
hooks:
- id: hadolint
args: ["--ignore=DL3013"]
-6
View File
@@ -4,15 +4,9 @@ All notable changes to the **Prowler API** are documented in this file.
## [1.25.3] (Prowler v5.24.3)
### 🚀 Added
- `/overviews/findings`, `/overviews/findings-severity` and `/overviews/services` now reflect newly-muted findings without waiting for the next scan. The post-mute `reaggregate-all-finding-group-summaries` task was extended to re-run the same per-scan pipeline that scan completion runs (`ScanSummary`, `DailySeveritySummary`, `FindingGroupDailySummary`) on the latest scan of every `(provider, day)` pair, keeping the pre-aggregated tables in sync with `Finding.muted` updates [(#10827)](https://github.com/prowler-cloud/prowler/pull/10827)
### 🐞 Fixed
- Finding groups aggregated `status` now treats muted findings as resolved: a group is `FAIL` only while at least one non-muted FAIL remains, otherwise it is `PASS` (including fully-muted groups). The `filter[status]` filter and the `sort=status` ordering share the same semantics, keeping `status` consistent with `fail_count` and the orthogonal `muted` flag [(#10825)](https://github.com/prowler-cloud/prowler/pull/10825)
- `aggregate_findings` is now idempotent: it deletes the scan's existing `ScanSummary` rows before `bulk_create`, so re-runs (such as the post-mute reaggregation pipeline) no longer violate the `unique_scan_summary` constraint and no longer abort the downstream `DailySeveritySummary` / `FindingGroupDailySummary` recomputation for the affected scan [(#10827)](https://github.com/prowler-cloud/prowler/pull/10827)
- Attack Paths: Findings on AWS were silently dropped during the Neo4j merge for resources whose Cartography node is keyed by a short identifier (e.g. EC2 instances) rather than the full ARN [(#10839)](https://github.com/prowler-cloud/prowler/pull/10839)
---
+10 -10
View File
@@ -6665,7 +6665,7 @@ files = [
[[package]]
name = "prowler"
version = "5.24.0"
version = "5.25.0"
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
optional = false
python-versions = ">=3.10,<3.13"
@@ -6754,8 +6754,8 @@ uuid6 = "2024.7.10"
[package.source]
type = "git"
url = "https://github.com/prowler-cloud/prowler.git"
reference = "v5.24"
resolved_reference = "ba5b23245f4805f46d67e67fc059aefd6831f7b3"
reference = "master"
resolved_reference = "ca29e354b622198ff6a70e2ea5eb04e4a44a0903"
[[package]]
name = "psutil"
@@ -6964,11 +6964,11 @@ description = "C parser in Python"
optional = false
python-versions = ">=3.10"
groups = ["main", "dev"]
markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""
files = [
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
]
markers = {main = "implementation_name != \"PyPy\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""}
[[package]]
name = "pydantic"
@@ -7147,14 +7147,14 @@ urllib3 = ">=1.26.0"
[[package]]
name = "pygments"
version = "2.20.0"
version = "2.19.2"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "pygments-2.20.0-py3-none-any.whl", hash = "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176"},
{file = "pygments-2.20.0.tar.gz", hash = "sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f"},
{file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"},
{file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"},
]
[package.extras]
@@ -7216,7 +7216,7 @@ description = "The MSALRuntime Python Interop Package"
optional = false
python-versions = ">=3.6"
groups = ["main"]
markers = "sys_platform == \"win32\" and (platform_system == \"Windows\" or platform_system == \"Darwin\" or platform_system == \"Linux\")"
markers = "(platform_system == \"Windows\" or platform_system == \"Darwin\" or platform_system == \"Linux\") and sys_platform == \"win32\""
files = [
{file = "pymsalruntime-0.18.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0c22e2e83faa10de422bbfaacc1bb2887c9025ee8a53f0fc2e4f7db01c4a7b66"},
{file = "pymsalruntime-0.18.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8ce2944a0f944833d047bb121396091e00287e2b6373716106da86ea99abf379"},
@@ -9424,4 +9424,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "5781e74b0692aed541fe445d6713d2dfd792bb226789501420aac4a8cb45aa2a"
content-hash = "a3ab982d11a87d951ff15694d2ca7fd51f1f51a451abb0baa067ccf6966367a8"
+2 -2
View File
@@ -25,7 +25,7 @@ dependencies = [
"defusedxml==0.7.1",
"gunicorn==23.0.0",
"lxml==5.3.2",
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.24",
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
"psycopg2-binary==2.9.9",
"pytest-celery[redis] (==1.3.0)",
"sentry-sdk[django] (==2.56.0)",
@@ -50,7 +50,7 @@ name = "prowler-api"
package-mode = false
# Needed for the SDK compatibility
requires-python = ">=3.11,<3.13"
version = "1.25.3"
version = "1.26.0"
[project.scripts]
celery = "src.backend.config.settings.celery"
+1 -1
View File
@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.25.3
version: 1.26.0
description: |-
Prowler API specification.
+1 -1
View File
@@ -417,7 +417,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.25.3"
spectacular_settings.VERSION = "1.26.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
@@ -313,16 +313,3 @@ def sync_aws_account(
)
return failed_syncs
def extract_short_uid(uid: str) -> str:
"""Return the short identifier from an AWS ARN or resource ID.
Supported inputs end in one of:
- `<type>/<id>` (e.g. `instance/i-xxx`)
- `<type>:<id>` (e.g. `function:name`)
- `<id>` (e.g. `bucket-name` or `i-xxx`)
If `uid` is already a short resource ID, it is returned unchanged.
"""
return uid.rsplit("/", 1)[-1].rsplit(":", 1)[-1]
@@ -37,8 +37,6 @@ class ProviderConfig:
# Label for resources connected to the account node, enabling indexed finding lookups.
resource_label: str # e.g., "_AWSResource"
ingestion_function: Callable
# Maps a Postgres resource UID (e.g. full ARN) to the short-id form Cartography stores on some node types (e.g. `i-xxx` for EC2Instance).
short_uid_extractor: Callable[[str], str]
# Provider Configurations
@@ -50,7 +48,6 @@ AWS_CONFIG = ProviderConfig(
uid_field="arn",
resource_label="_AWSResource",
ingestion_function=aws.start_aws_ingestion,
short_uid_extractor=aws.extract_short_uid,
)
PROVIDER_CONFIGS: dict[str, ProviderConfig] = {
@@ -119,21 +116,6 @@ def get_provider_resource_label(provider_type: str) -> str:
return config.resource_label if config else "_UnknownProviderResource"
def _identity_short_uid(uid: str) -> str:
"""Fallback short-uid extractor for providers without a custom mapping."""
return uid
def get_short_uid_extractor(provider_type: str) -> Callable[[str], str]:
"""Get the short-uid extractor for a provider type.
Returns an identity function when the provider is unknown, so callers can
rely on a callable always being returned.
"""
config = PROVIDER_CONFIGS.get(provider_type)
return config.short_uid_extractor if config else _identity_short_uid
# Dynamic Isolation Label Helpers
# --------------------------------
@@ -8,7 +8,7 @@ This module handles:
"""
from collections import defaultdict
from typing import Any, Callable, Generator
from typing import Any, Generator
from uuid import UUID
import neo4j
@@ -21,7 +21,6 @@ from tasks.jobs.attack_paths.config import (
get_node_uid_field,
get_provider_resource_label,
get_root_node_label,
get_short_uid_extractor,
)
from tasks.jobs.attack_paths.queries import (
ADD_RESOURCE_LABEL_TEMPLATE,
@@ -58,9 +57,7 @@ _DB_QUERY_FIELDS = [
]
def _to_neo4j_dict(
record: dict[str, Any], resource_uid: str, resource_short_uid: str
) -> dict[str, Any]:
def _to_neo4j_dict(record: dict[str, Any], resource_uid: str) -> dict[str, Any]:
"""Transform a Django `.values()` record into a `dict` ready for Neo4j ingestion."""
return {
"id": str(record["id"]),
@@ -78,7 +75,6 @@ def _to_neo4j_dict(
"muted": record["muted"],
"muted_reason": record["muted_reason"],
"resource_uid": resource_uid,
"resource_short_uid": resource_short_uid,
}
@@ -174,8 +170,6 @@ def load_findings(
batch_num = 0
total_records = 0
edges_merged = 0
edges_dropped = 0
for batch in findings_batches:
batch_num += 1
batch_size = len(batch)
@@ -184,15 +178,9 @@ def load_findings(
parameters["findings_data"] = batch
logger.info(f"Loading findings batch {batch_num} ({batch_size} records)")
summary = neo4j_session.run(query, parameters).single()
if summary is not None:
edges_merged += summary.get("merged_count", 0)
edges_dropped += summary.get("dropped_count", 0)
neo4j_session.run(query, parameters)
logger.info(
f"Finished loading {total_records} records in {batch_num} batches "
f"(edges_merged={edges_merged}, edges_dropped={edges_dropped})"
)
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
return total_records
@@ -217,9 +205,8 @@ def stream_findings_with_resources(
)
tenant_id = prowler_api_provider.tenant_id
short_uid_extractor = get_short_uid_extractor(prowler_api_provider.provider)
for batch in _paginate_findings(tenant_id, scan_id):
enriched = _enrich_batch_with_resources(batch, tenant_id, short_uid_extractor)
enriched = _enrich_batch_with_resources(batch, tenant_id)
if enriched:
yield enriched
@@ -282,7 +269,6 @@ def _fetch_findings_batch(
def _enrich_batch_with_resources(
findings_batch: list[dict[str, Any]],
tenant_id: str,
short_uid_extractor: Callable[[str], str],
) -> list[dict[str, Any]]:
"""
Enrich findings with their resource UIDs.
@@ -294,7 +280,7 @@ def _enrich_batch_with_resources(
resource_map = _build_finding_resource_map(finding_ids, tenant_id)
return [
_to_neo4j_dict(finding, resource_uid, short_uid_extractor(resource_uid))
_to_neo4j_dict(finding, resource_uid)
for finding in findings_batch
for resource_uid in resource_map.get(finding["id"], [])
]
@@ -35,56 +35,46 @@ INSERT_FINDING_TEMPLATE = f"""
UNWIND $findings_data AS finding_data
OPTIONAL MATCH (resource_by_uid:__RESOURCE_LABEL__ {{__NODE_UID_FIELD__: finding_data.resource_uid}})
WITH finding_data, resource_by_uid
OPTIONAL MATCH (resource_by_id:__RESOURCE_LABEL__ {{id: finding_data.resource_uid}})
WHERE resource_by_uid IS NULL
OPTIONAL MATCH (resource_by_short:__RESOURCE_LABEL__ {{id: finding_data.resource_short_uid}})
WHERE resource_by_uid IS NULL AND resource_by_id IS NULL
WITH finding_data,
resource_by_uid,
resource_by_id,
head(collect(resource_by_short)) AS resource_by_short
WITH finding_data,
COALESCE(resource_by_uid, resource_by_id, resource_by_short) AS resource
WITH finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
WHERE resource IS NOT NULL
FOREACH (_ IN CASE WHEN resource IS NOT NULL THEN [1] ELSE [] END |
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
ON CREATE SET
finding.id = finding_data.id,
finding.uid = finding_data.uid,
finding.inserted_at = finding_data.inserted_at,
finding.updated_at = finding_data.updated_at,
finding.first_seen_at = finding_data.first_seen_at,
finding.scan_id = finding_data.scan_id,
finding.delta = finding_data.delta,
finding.status = finding_data.status,
finding.status_extended = finding_data.status_extended,
finding.severity = finding_data.severity,
finding.check_id = finding_data.check_id,
finding.check_title = finding_data.check_title,
finding.muted = finding_data.muted,
finding.muted_reason = finding_data.muted_reason,
finding.firstseen = timestamp(),
finding.lastupdated = $last_updated,
finding._module_name = 'cartography:prowler',
finding._module_version = $prowler_version
ON MATCH SET
finding.status = finding_data.status,
finding.status_extended = finding_data.status_extended,
finding.lastupdated = $last_updated
MERGE (resource)-[rel:HAS_FINDING]->(finding)
ON CREATE SET
rel.firstseen = timestamp(),
rel.lastupdated = $last_updated,
rel._module_name = 'cartography:prowler',
rel._module_version = $prowler_version
ON MATCH SET
rel.lastupdated = $last_updated
)
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
ON CREATE SET
finding.id = finding_data.id,
finding.uid = finding_data.uid,
finding.inserted_at = finding_data.inserted_at,
finding.updated_at = finding_data.updated_at,
finding.first_seen_at = finding_data.first_seen_at,
finding.scan_id = finding_data.scan_id,
finding.delta = finding_data.delta,
finding.status = finding_data.status,
finding.status_extended = finding_data.status_extended,
finding.severity = finding_data.severity,
finding.check_id = finding_data.check_id,
finding.check_title = finding_data.check_title,
finding.muted = finding_data.muted,
finding.muted_reason = finding_data.muted_reason,
finding.firstseen = timestamp(),
finding.lastupdated = $last_updated,
finding._module_name = 'cartography:prowler',
finding._module_version = $prowler_version
ON MATCH SET
finding.status = finding_data.status,
finding.status_extended = finding_data.status_extended,
finding.lastupdated = $last_updated
WITH sum(CASE WHEN resource IS NOT NULL THEN 1 ELSE 0 END) AS merged_count,
sum(CASE WHEN resource IS NULL THEN 1 ELSE 0 END) AS dropped_count
RETURN merged_count, dropped_count
MERGE (resource)-[rel:HAS_FINDING]->(finding)
ON CREATE SET
rel.firstseen = timestamp(),
rel.lastupdated = $last_updated,
rel._module_name = 'cartography:prowler',
rel._module_version = $prowler_version
ON MATCH SET
rel.lastupdated = $last_updated
"""
# Internet queries (used by internet.py)
-3
View File
@@ -1198,9 +1198,6 @@ def aggregate_findings(tenant_id: str, scan_id: str):
)
for agg in aggregation
}
# Delete first so re-runs (e.g. post-mute reaggregation) don't hit
# the `unique_scan_summary` constraint.
ScanSummary.objects.filter(tenant_id=tenant_id, scan_id=scan_id).delete()
ScanSummary.objects.bulk_create(scan_aggregations, batch_size=3000)
+9 -27
View File
@@ -771,22 +771,15 @@ def aggregate_finding_group_summaries_task(tenant_id: str, scan_id: str):
)
@set_tenant(keep_tenant=True)
def reaggregate_all_finding_group_summaries_task(tenant_id: str):
"""Reaggregate every pre-aggregated summary table for this tenant.
"""Reaggregate finding group summaries for every (provider, day) combination.
Mirrors the unbounded scope of `mute_historical_findings_task`: that task
rewrites every Finding row whose UID matches a mute rule, with no time
limit. To keep the pre-aggregated tables consistent with that update,
this task re-runs the same per-scan aggregation pipeline that scan
completion runs on the latest completed scan of every (provider, day)
pair, rebuilding the three tables that power the read endpoints:
- `ScanSummary` and `DailySeveritySummary` -> `/overviews/findings`,
`/overviews/findings-severity`, `/overviews/services`.
- `FindingGroupDailySummary` -> `/finding-groups` and
`/finding-groups/latest`.
Per-scan pipelines are dispatched in parallel via a Celery group so
wallclock scales with the worker pool.
limit. To keep the daily summaries consistent with that update, this task
re-runs the aggregator on the latest completed scan of every (provider,
day) pair that exists in the database. Tasks are dispatched in parallel
via a Celery group so the wallclock scales with the worker pool, not with
the number of pairs.
"""
completed_scans = list(
Scan.objects.filter(
@@ -811,23 +804,12 @@ def reaggregate_all_finding_group_summaries_task(tenant_id: str):
scan_ids = list(latest_scans.values())
if scan_ids:
logger.info(
"Reaggregating overview/finding summaries for %d scans (provider x day)",
"Reaggregating finding group summaries for %d scans (provider x day)",
len(scan_ids),
)
# DailySeveritySummary reads from ScanSummary, so ScanSummary must be
# recomputed first; FindingGroupDailySummary reads from Finding
# directly and can run in parallel with the severity step.
group(
chain(
perform_scan_summary_task.si(tenant_id=tenant_id, scan_id=scan_id),
group(
aggregate_daily_severity_task.si(
tenant_id=tenant_id, scan_id=scan_id
),
aggregate_finding_group_summaries_task.si(
tenant_id=tenant_id, scan_id=scan_id
),
),
aggregate_finding_group_summaries_task.si(
tenant_id=tenant_id, scan_id=scan_id
)
for scan_id in scan_ids
).apply_async()
@@ -1285,12 +1285,6 @@ class TestAttackPathsFindingsHelpers:
config = SimpleNamespace(update_tag=12345)
mock_session = MagicMock()
first_result = MagicMock()
first_result.single.return_value = {"merged_count": 1, "dropped_count": 0}
second_result = MagicMock()
second_result.single.return_value = {"merged_count": 0, "dropped_count": 1}
mock_session.run.side_effect = [first_result, second_result]
with (
patch(
"tasks.jobs.attack_paths.findings.get_node_uid_field",
@@ -1300,7 +1294,6 @@ class TestAttackPathsFindingsHelpers:
"tasks.jobs.attack_paths.findings.get_provider_resource_label",
return_value="_AWSResource",
),
patch("tasks.jobs.attack_paths.findings.logger") as mock_logger,
):
findings_module.load_findings(
mock_session, findings_generator(), provider, config
@@ -1312,14 +1305,6 @@ class TestAttackPathsFindingsHelpers:
assert params["last_updated"] == config.update_tag
assert "findings_data" in params
summary_log = next(
call_args.args[0]
for call_args in mock_logger.info.call_args_list
if call_args.args and "Finished loading" in call_args.args[0]
)
assert "edges_merged=1" in summary_log
assert "edges_dropped=1" in summary_log
def test_stream_findings_with_resources_returns_latest_scan_data(
self,
tenants_fixture,
@@ -1499,12 +1484,11 @@ class TestAttackPathsFindingsHelpers:
"default",
):
result = findings_module._enrich_batch_with_resources(
[finding_dict], str(tenant.id), lambda uid: f"short:{uid}"
[finding_dict], str(tenant.id)
)
assert len(result) == 1
assert result[0]["resource_uid"] == resource.uid
assert result[0]["resource_short_uid"] == f"short:{resource.uid}"
assert result[0]["id"] == str(finding.id)
assert result[0]["status"] == "FAIL"
@@ -1588,7 +1572,7 @@ class TestAttackPathsFindingsHelpers:
"default",
):
result = findings_module._enrich_batch_with_resources(
[finding_dict], str(tenant.id), lambda uid: uid
[finding_dict], str(tenant.id)
)
assert len(result) == 3
@@ -1662,7 +1646,7 @@ class TestAttackPathsFindingsHelpers:
patch("tasks.jobs.attack_paths.findings.logger") as mock_logger,
):
result = findings_module._enrich_batch_with_resources(
[finding_dict], str(tenant.id), lambda uid: uid
[finding_dict], str(tenant.id)
)
assert len(result) == 0
@@ -1709,63 +1693,6 @@ class TestAttackPathsFindingsHelpers:
mock_session.run.assert_not_called()
@pytest.mark.parametrize(
"uid, expected",
[
(
"arn:aws:ec2:us-east-1:552455647653:instance/i-05075b63eb51baacb",
"i-05075b63eb51baacb",
),
(
"arn:aws:ec2:us-east-1:123456789012:volume/vol-0abcd1234ef567890",
"vol-0abcd1234ef567890",
),
(
"arn:aws:ec2:us-east-1:123456789012:security-group/sg-0123abcd",
"sg-0123abcd",
),
("arn:aws:s3:::my-bucket-name", "my-bucket-name"),
("arn:aws:iam::123456789012:role/MyRole", "MyRole"),
(
"arn:aws:lambda:us-east-1:123456789012:function:my-function",
"my-function",
),
("i-05075b63eb51baacb", "i-05075b63eb51baacb"),
],
)
def test_extract_short_uid_aws_variants(self, uid, expected):
from tasks.jobs.attack_paths.aws import extract_short_uid
assert extract_short_uid(uid) == expected
def test_insert_finding_template_has_short_id_fallback(self):
from tasks.jobs.attack_paths.queries import (
INSERT_FINDING_TEMPLATE,
render_cypher_template,
)
rendered = render_cypher_template(
INSERT_FINDING_TEMPLATE,
{
"__NODE_UID_FIELD__": "arn",
"__RESOURCE_LABEL__": "_AWSResource",
},
)
assert (
"resource_by_uid:_AWSResource {arn: finding_data.resource_uid}" in rendered
)
assert "resource_by_id:_AWSResource {id: finding_data.resource_uid}" in rendered
assert (
"resource_by_short:_AWSResource {id: finding_data.resource_short_uid}"
in rendered
)
assert "head(collect(resource_by_short)) AS resource_by_short" in rendered
assert (
"COALESCE(resource_by_uid, resource_by_id, resource_by_short)" in rendered
)
assert "RETURN merged_count, dropped_count" in rendered
class TestAddResourceLabel:
def test_add_resource_label_applies_private_label(self):
-59
View File
@@ -36,7 +36,6 @@ from api.models import (
Provider,
Resource,
Scan,
ScanSummary,
StateChoices,
StatusChoices,
)
@@ -3359,64 +3358,6 @@ class TestAggregateFindings:
regions = {s.region for s in summaries}
assert regions == {"us-east-1", "us-west-2"}
def test_aggregate_findings_is_idempotent_on_rerun(
self,
tenants_fixture,
scans_fixture,
findings_fixture,
):
"""Re-running `aggregate_findings` for the same scan must not violate
the `unique_scan_summary` constraint, and the resulting row set for
the scan must match the single-run output. This is exercised by the
post-mute reaggregation pipeline, which re-dispatches
`perform_scan_summary_task` against scans whose summaries already
exist."""
tenant = tenants_fixture[0]
scan = scans_fixture[0]
aggregate_findings(str(tenant.id), str(scan.id))
first_run_ids = set(
ScanSummary.all_objects.filter(
tenant_id=tenant.id, scan_id=scan.id
).values_list("id", flat=True)
)
first_run_rows = list(
ScanSummary.all_objects.filter(tenant_id=tenant.id, scan_id=scan.id).values(
"check_id",
"service",
"severity",
"region",
"fail",
"_pass",
"muted",
"total",
)
)
# Second invocation must not raise and must replace the rows without
# leaving duplicates behind.
aggregate_findings(str(tenant.id), str(scan.id))
second_run_ids = set(
ScanSummary.all_objects.filter(
tenant_id=tenant.id, scan_id=scan.id
).values_list("id", flat=True)
)
second_run_rows = list(
ScanSummary.all_objects.filter(tenant_id=tenant.id, scan_id=scan.id).values(
"check_id",
"service",
"severity",
"region",
"fail",
"_pass",
"muted",
"total",
)
)
assert second_run_rows == first_run_rows
assert first_run_ids.isdisjoint(second_run_ids)
@pytest.mark.django_db
class TestAggregateFindingsByRegion:
+22 -66
View File
@@ -2359,20 +2359,11 @@ class TestReaggregateAllFindingGroupSummaries:
def setup_method(self):
self.tenant_id = str(uuid.uuid4())
@patch("tasks.tasks.chain")
@patch("tasks.tasks.group")
@patch("tasks.tasks.aggregate_finding_group_summaries_task")
@patch("tasks.tasks.aggregate_daily_severity_task")
@patch("tasks.tasks.perform_scan_summary_task")
@patch("tasks.tasks.Scan.objects.filter")
def test_dispatches_subtasks_for_each_provider_per_day(
self,
mock_scan_filter,
mock_scan_summary_task,
mock_daily_severity_task,
mock_finding_group_task,
mock_group,
mock_chain,
self, mock_scan_filter, mock_agg_task, mock_group
):
provider_id_1 = uuid.uuid4()
provider_id_2 = uuid.uuid4()
@@ -2382,13 +2373,8 @@ class TestReaggregateAllFindingGroupSummaries:
today = datetime.now(tz=timezone.utc)
yesterday = today - timedelta(days=1)
mock_outer_group_result = MagicMock()
# The first `group()` call wraps the inner (severity, finding-group)
# parallel step; subsequent calls wrap the outer per-scan generator.
mock_group.side_effect = lambda *args, **kwargs: (
list(args[0]) if args and hasattr(args[0], "__iter__") else None,
mock_outer_group_result,
)[1]
mock_group_result = MagicMock()
mock_group.side_effect = lambda gen: (list(gen), mock_group_result)[1]
mock_scan_filter.return_value.order_by.return_value.values.return_value = [
{
@@ -2411,40 +2397,23 @@ class TestReaggregateAllFindingGroupSummaries:
result = reaggregate_all_finding_group_summaries_task(tenant_id=self.tenant_id)
assert result == {"scans_reaggregated": 3}
expected_scan_ids = {
str(scan_id_today_p1),
str(scan_id_today_p2),
str(scan_id_yesterday_p1),
}
for task_mock in (
mock_scan_summary_task,
mock_daily_severity_task,
mock_finding_group_task,
):
assert task_mock.si.call_count == 3
dispatched = {
call.kwargs["scan_id"] for call in task_mock.si.call_args_list
}
assert dispatched == expected_scan_ids
for call in task_mock.si.call_args_list:
assert call.kwargs["tenant_id"] == self.tenant_id
assert mock_chain.call_count == 3
mock_outer_group_result.apply_async.assert_called_once()
assert mock_agg_task.si.call_count == 3
mock_agg_task.si.assert_any_call(
tenant_id=self.tenant_id, scan_id=str(scan_id_today_p1)
)
mock_agg_task.si.assert_any_call(
tenant_id=self.tenant_id, scan_id=str(scan_id_today_p2)
)
mock_agg_task.si.assert_any_call(
tenant_id=self.tenant_id, scan_id=str(scan_id_yesterday_p1)
)
mock_group_result.apply_async.assert_called_once()
@patch("tasks.tasks.chain")
@patch("tasks.tasks.group")
@patch("tasks.tasks.aggregate_finding_group_summaries_task")
@patch("tasks.tasks.aggregate_daily_severity_task")
@patch("tasks.tasks.perform_scan_summary_task")
@patch("tasks.tasks.Scan.objects.filter")
def test_dedupes_scans_to_latest_per_provider_per_day(
self,
mock_scan_filter,
mock_scan_summary_task,
mock_daily_severity_task,
mock_finding_group_task,
mock_group,
mock_chain,
self, mock_scan_filter, mock_agg_task, mock_group
):
"""When several scans run on the same day for the same provider, only
the latest one is dispatched (matching the daily summary unique key)."""
@@ -2454,11 +2423,8 @@ class TestReaggregateAllFindingGroupSummaries:
today_late = datetime.now(tz=timezone.utc)
today_early = today_late - timedelta(hours=4)
mock_outer_group_result = MagicMock()
mock_group.side_effect = lambda *args, **kwargs: (
list(args[0]) if args and hasattr(args[0], "__iter__") else None,
mock_outer_group_result,
)[1]
mock_group_result = MagicMock()
mock_group.side_effect = lambda gen: (list(gen), mock_group_result)[1]
# Returned ordered by `-completed_at`, so the most recent comes first.
mock_scan_filter.return_value.order_by.return_value.values.return_value = [
@@ -2477,27 +2443,17 @@ class TestReaggregateAllFindingGroupSummaries:
result = reaggregate_all_finding_group_summaries_task(tenant_id=self.tenant_id)
assert result == {"scans_reaggregated": 1}
for task_mock in (
mock_scan_summary_task,
mock_daily_severity_task,
mock_finding_group_task,
):
task_mock.si.assert_called_once_with(
tenant_id=self.tenant_id, scan_id=str(latest_scan_today)
)
mock_chain.assert_called_once()
mock_outer_group_result.apply_async.assert_called_once()
mock_agg_task.si.assert_called_once_with(
tenant_id=self.tenant_id, scan_id=str(latest_scan_today)
)
mock_group_result.apply_async.assert_called_once()
@patch("tasks.tasks.chain")
@patch("tasks.tasks.group")
@patch("tasks.tasks.Scan.objects.filter")
def test_no_completed_scans_skips_dispatch(
self, mock_scan_filter, mock_group, mock_chain
):
def test_no_completed_scans_skips_dispatch(self, mock_scan_filter, mock_group):
mock_scan_filter.return_value.order_by.return_value.values.return_value = []
result = reaggregate_all_finding_group_summaries_task(tenant_id=self.tenant_id)
assert result == {"scans_reaggregated": 0}
mock_group.assert_not_called()
mock_chain.assert_not_called()
+64
View File
@@ -0,0 +1,64 @@
# Prowler Reverse Proxy Configuration
Ready-to-use nginx configuration for running Prowler behind a reverse proxy.
## Problem
Prowler's default Docker setup exposes two separate services:
- **UI** on port 3000
- **API** on port 8080
This causes CORS issues and authentication failures (especially SAML SSO) when accessed through an external reverse proxy, since the proxy typically exposes a single domain.
## Solution
This adds an nginx container that unifies both services behind a single port, correctly forwarding headers so that Django generates proper URLs for SAML ACS callbacks and API responses.
## Quick Start
From the prowler root directory:
docker compose -f docker-compose.yml \
-f contrib/reverse-proxy/docker-compose.reverse-proxy.yml \
up -d
Access Prowler at http://localhost (port 80).
## With an External Reverse Proxy
Point your external reverse proxy to the prowler-nginx container on port 80.
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| PROWLER_PROXY_PORT | 80 | Port exposed by the nginx proxy |
### Example: Traefik
services:
nginx:
labels:
- "traefik.enable=true"
- "traefik.http.routers.prowler.rule=Host(`prowler.example.com`)"
- "traefik.http.routers.prowler.tls.certresolver=letsencrypt"
- "traefik.http.services.prowler.loadbalancer.server.port=80"
### Example: Caddy
prowler.example.com {
reverse_proxy prowler-nginx:80
}
## SAML SSO
If using SAML SSO behind a reverse proxy, also set the SAML_ACS_BASE_URL environment variable:
SAML_ACS_BASE_URL=https://prowler.example.com
## Architecture
Internet -> External Reverse Proxy -> prowler-nginx:80
|-- /api/* -> prowler-api:8080
|-- /accounts/saml/ -> prowler-api:8080
+-- /* -> prowler-ui:3000
@@ -0,0 +1,42 @@
# Prowler Reverse Proxy - Docker Compose Override
#
# Use this alongside the main docker-compose.yml to add an nginx
# reverse proxy that unifies UI and API behind a single port.
#
# Usage:
# docker compose -f docker-compose.yml -f contrib/reverse-proxy/docker-compose.reverse-proxy.yml up -d
#
# Then access Prowler at http://localhost (port 80) or configure
# your external reverse proxy (Traefik, Caddy, Cloudflare Tunnel,
# Pangolin, etc.) to point to this container on port 80.
#
# For HTTPS with your own certs, see the README in this directory.
#
# Fixes: https://github.com/prowler-cloud/prowler/issues/8516
services:
nginx:
image: nginx:alpine
container_name: prowler-nginx
restart: unless-stopped
ports:
- "${PROWLER_PROXY_PORT:-80}:80"
volumes:
- ./contrib/reverse-proxy/nginx.conf:/etc/nginx/conf.d/default.conf:ro
depends_on:
- prowler-ui
- prowler-api
networks:
- prowler-network
# Override UI to not expose port externally (nginx handles it)
prowler-ui:
ports: !reset []
# Override API to not expose port externally (nginx handles it)
prowler-api:
ports: !reset []
networks:
prowler-network:
driver: bridge
+70
View File
@@ -0,0 +1,70 @@
# Prowler Reverse Proxy Configuration
# Routes both UI and API through a single endpoint
#
# Usage: See docker-compose.reverse-proxy.yml
# Fixes: https://github.com/prowler-cloud/prowler/issues/8516
upstream prowler-ui {
server prowler-ui:3000;
}
upstream prowler-api {
server prowler-api:8080;
}
server {
listen 80;
server_name _;
# Security headers
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# API requests proxy to prowler-api
location /api/ {
proxy_pass http://prowler-api/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_read_timeout 300s;
proxy_connect_timeout 10s;
# Handle large scan payloads
client_max_body_size 50m;
}
# SAML endpoints proxy to prowler-api
location /accounts/saml/ {
proxy_pass http://prowler-api/accounts/saml/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
}
# Everything else proxy to prowler-ui
location / {
proxy_pass http://prowler-ui/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
# WebSocket support for Next.js HMR (dev) and live updates
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# Health check endpoint
location /health {
access_log off;
return 200 "ok\n";
add_header Content-Type text/plain;
}
}
@@ -121,8 +121,8 @@ To update the environment file:
Edit the `.env` file and change version values:
```env
PROWLER_UI_VERSION="5.24.2"
PROWLER_API_VERSION="5.24.2"
PROWLER_UI_VERSION="5.24.0"
PROWLER_API_VERSION="5.24.0"
```
<Note>
@@ -7,6 +7,11 @@ Prowler requires AWS credentials to function properly. Authentication is availab
- Static Credentials
- Assumed Role
When using **Assumed Role**, the Prowler UI exposes two credential sources for calling `sts:AssumeRole`. The labels differ between Prowler Cloud and self-hosted Prowler App, but both map to the same underlying credential types:
- **AWS SDK Default** (shown as *"Prowler Cloud will assume your IAM role"* in Prowler Cloud and *"AWS SDK Default"* in self-hosted Prowler App): Prowler uses the credentials already available to the API and worker containers through the [AWS SDK default credential chain](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). This is the default in Prowler Cloud and requires extra configuration in self-hosted Prowler App (see [Configuring AWS SDK Default for Self-Hosted Prowler App](#configuring-aws-sdk-default-for-self-hosted-prowler-app)).
- **Access & Secret Key**: You paste an IAM user's `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN` into the form. Prowler uses those keys to call `sts:AssumeRole`.
## Required Permissions
To ensure full functionality, attach the following AWS managed policies to the designated user or role:
@@ -76,6 +81,68 @@ This method grants permanent access and is the recommended setup for production
---
## Configuring AWS SDK Default for Self-Hosted Prowler App
When self-hosting Prowler App with Docker Compose, the API and worker containers do not have AWS credentials by default. Selecting **AWS SDK Default** without configuring those credentials produces:
```
AWSAssumeRoleError[1012]: AWS assume role error - An error occurred (InvalidClientTokenId) when calling the AssumeRole operation: The security token included in the request is invalid.
```
To fix this, expose an IAM identity with `sts:AssumeRole` permission on the target role to both the `api` and `worker` services.
### Option 1: Environment Variables in `.env`
Add the following keys to the `.env` file used by `docker-compose.yml`:
```bash
AWS_ACCESS_KEY_ID="<your-access-key-id>"
AWS_SECRET_ACCESS_KEY="<your-secret-access-key>"
AWS_SESSION_TOKEN="<optional-session-token>"
AWS_DEFAULT_REGION="us-east-1"
```
The existing `docker-compose.yml` already loads `.env` into the `api`, `worker`, and `worker-beat` services, so `boto3` will pick them up through the default credential chain.
<Warning>
Treat the `.env` file as a secret. Do not commit it to version control, scope the IAM identity to the minimum permissions required (`sts:AssumeRole` on the target `ProwlerScan` role only), prefer short-lived credentials over long-lived access keys, and rotate the keys immediately if you suspect exposure.
</Warning>
Recreate the containers to apply the change. A plain `docker compose restart` will **not** reload values from a modified `.env` file — you must force-recreate:
```bash
docker compose up -d --force-recreate api worker worker-beat
```
### Option 2: IAM Role (Host with Instance Metadata)
If you run Prowler App on an EC2 instance, ECS task, or EKS pod with an attached IAM role that can assume the scan role, no extra configuration is needed — `boto3` resolves credentials through instance or task metadata automatically.
### Trust Policy: Align `IAMPrincipal` With Your Identity
The [Prowler scan role CloudFormation template](https://github.com/prowler-cloud/prowler/blob/master/permissions/templates/cloudformation/prowler-scan-role.yml) restricts the trust policy with:
```
aws:PrincipalArn StringLike arn:aws:iam::<AccountId>:<IAMPrincipal>
```
`IAMPrincipal` defaults to `role/prowler*`, which only allows IAM roles whose name starts with `prowler`. If the identity hosting the API and worker containers is anything else, the `sts:AssumeRole` call fails with `AccessDenied` even when the credentials themselves are valid.
Redeploy (or update) the CloudFormation stack with an `IAMPrincipal` that matches your identity:
| Your identity on the API/worker containers | `IAMPrincipal` value |
| --- | --- |
| IAM user (for example `prowler-app`) | `user/prowler-app` |
| IAM role whose name doesn't start with `prowler` | `role/<your-role-name>` |
`AccountId` must also point to the account where that identity lives — the default is Prowler Cloud's account and only applies when assuming from Prowler Cloud.
<Note>
The same `External ID` entered in the Prowler UI must match the `ExternalId` parameter used when deploying the CloudFormation stack. A mismatch produces `AccessDenied` on `sts:AssumeRole`, not `InvalidClientTokenId`.
</Note>
---
## Credentials
<Tabs>
@@ -46,15 +46,15 @@ Before proceeding, choose the preferred authentication mode:
**Credentials**
* Quick scan as current user
* No extra setup
* Credentials time out
* Quick scan using an IAM user's access keys
* No extra setup in AWS
* Static keys can be rotated or revoked at any time
**Assumed Role**
* Preferred Setup
* Permanent Credentials
* Requires access to create role
* Recommended for production
* With AWS SDK Default as the credential source, no long-lived keys are stored in Prowler (Access & Secret Key still requires pasted keys)
* Requires permission to create an IAM role in the target account
---
@@ -67,18 +67,23 @@ This method grants permanent access and is the recommended setup for production
For detailed instructions on how to create the role, see [Authentication > Assume Role](/user-guide/providers/aws/authentication#assume-role-recommended).
8. Once the role is created, go to the **IAM Console**, click on the "ProwlerScan" role to open its details:
7. Once the role is created, go to the **IAM Console**, click on the "ProwlerScan" role to open its details:
![ProwlerScan role info](/images/providers/prowler-scan-pre-info.png)
9. Copy the **Role ARN**
8. Copy the **Role ARN**
![New Role Info](/images/providers/get-role-arn.png)
10. Paste the ARN into the corresponding field in Prowler Cloud or Prowler App
9. Paste the ARN into the corresponding field in Prowler Cloud or Prowler App
![Input the Role ARN](/images/providers/paste-role-arn-prowler.png)
10. Select the credential source Prowler should use to call `sts:AssumeRole`. The option label differs between deployments but both map to the same `aws-sdk-default` credential type:
- **"Prowler Cloud will assume your IAM role"** (default in Prowler Cloud) / **"AWS SDK Default"** (in self-hosted Prowler App): Prowler uses the credentials available in the API and worker environment through the [AWS SDK default credential chain](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). In self-hosted Prowler App, these containers have no AWS credentials by default — see [Configuring AWS SDK Default for Self-Hosted Prowler App](/user-guide/providers/aws/authentication#configuring-aws-sdk-default-for-self-hosted-prowler-app) before choosing this option, or the connection test will fail with `InvalidClientTokenId`.
- **Access & Secret Key**: Paste an IAM user's `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` (and optional `AWS_SESSION_TOKEN`) into the form. The IAM principal must be allowed to assume the target role and must match the `IAMPrincipal` parameter of the scan role template (default: `role/prowler*`).
11. Click "Next", then "Launch Scan"
![Next button in Prowler Cloud](/images/providers/next-button-prowler-cloud.png)
+3 -3
View File
@@ -911,11 +911,11 @@ wheels = [
[[package]]
name = "python-dotenv"
version = "1.1.1"
version = "1.2.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" }
sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135, upload-time = "2026-03-01T16:00:26.196Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
{ url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" },
]
[[package]]
Generated
+3 -3
View File
@@ -4702,14 +4702,14 @@ dev = ["black (==22.6.0)", "flake8", "mypy", "pytest"]
[[package]]
name = "pyasn1"
version = "0.6.3"
version = "0.6.2"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pyasn1-0.6.3-py3-none-any.whl", hash = "sha256:a80184d120f0864a52a073acc6fc642847d0be408e7c7252f31390c0f4eadcde"},
{file = "pyasn1-0.6.3.tar.gz", hash = "sha256:697a8ecd6d98891189184ca1fa05d1bb00e2f84b5977c481452050549c8a72cf"},
{file = "pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf"},
{file = "pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b"},
]
[[package]]
+6 -2
View File
@@ -7,12 +7,15 @@ All notable changes to the **Prowler SDK** are documented in this file.
### 🐞 Fixed
- CloudTrail resource timeline uses resource name as fallback in `LookupEvents` [(#10828)](https://github.com/prowler-cloud/prowler/pull/10828)
- Exclude `me-south-1` and `me-central-1` from default AWS scans to prevent hangs when the host can't reach those regional endpoints [(#10837)](https://github.com/prowler-cloud/prowler/pull/10837)
---
## [5.24.1] (Prowler v5.24.1)
### 🚀 Added
- `--repo-list-file` CLI flag for GitHub provider to load repositories from a file [(#10501)](https://github.com/prowler-cloud/prowler/pull/10501)
### 🔄 Changed
- `msgraph-sdk` from 1.23.0 to 1.55.0 and `azure-mgmt-resource` from 23.3.0 to 24.0.0, removing `marshmallow` as is a transitively dev dependency [(#10733)](https://github.com/prowler-cloud/prowler/pull/10733)
@@ -20,8 +23,8 @@ All notable changes to the **Prowler SDK** are documented in this file.
### 🐞 Fixed
- Cloudflare account-scoped API tokens failing connection test in the App with `CloudflareUserTokenRequiredError` [(#10723)](https://github.com/prowler-cloud/prowler/pull/10723)
- Google Workspace Calendar checks false FAIL on unconfigured settings with secure Google defaults [(#10726)](https://github.com/prowler-cloud/prowler/pull/10726)
- `prowler image --registry-list` crashes with `AttributeError` because `ImageProvider.__init__` returns early before registering the global provider [(#10691)](https://github.com/prowler-cloud/prowler/pull/10691)
- Google Workspace Calendar checks false FAIL on unconfigured settings with secure Google defaults [(#10726)](https://github.com/prowler-cloud/prowler/pull/10726)
- Google Workspace Drive checks false FAIL on unconfigured settings with secure Google defaults [(#10727)](https://github.com/prowler-cloud/prowler/pull/10727)
- Cloudflare `validate_credentials` can hang in an infinite pagination loop when the SDK repeats accounts, blocking connection tests [(#10771)](https://github.com/prowler-cloud/prowler/pull/10771)
@@ -51,6 +54,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
### 🐞 Fixed
- `prowler image --registry-list` crashes with `AttributeError` because `ImageProvider.__init__` returns early before registering the global provider [(#10691)](https://github.com/prowler-cloud/prowler/pull/10691)
- Vercel firewall config handling for team-scoped projects and current API response shapes [(#10695)](https://github.com/prowler-cloud/prowler/pull/10695)
---
+1 -1
View File
@@ -38,7 +38,7 @@ class _MutableTimestamp:
timestamp = _MutableTimestamp(datetime.today())
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
prowler_version = "5.24.3"
prowler_version = "5.25.0"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
+1 -3
View File
@@ -6,9 +6,7 @@ aws:
# aws.disallowed_regions --> List of AWS regions to exclude from the scan.
# Also settable via the PROWLER_AWS_DISALLOWED_REGIONS environment variable or
# the --excluded-region CLI flag. Precedence: CLI > env var > config file.
disallowed_regions:
- me-south-1
- me-central-1
# disallowed_regions: []
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
+481 -1
View File
@@ -1,9 +1,10 @@
import json
import os
import sys
from enum import Enum
from typing import Optional, Union
from pydantic.v1 import BaseModel, ValidationError, root_validator
from pydantic.v1 import BaseModel, Field, ValidationError, root_validator
from prowler.lib.check.utils import list_compliance_modules
from prowler.lib.logger import logger
@@ -430,3 +431,482 @@ def load_compliance_framework(
sys.exit(1)
else:
return compliance_framework
# ─── Universal Compliance Schema Models (Phase 1-3) ─────────────────────────
class OutputFormats(BaseModel):
"""Flags indicating in which output formats an attribute should be included."""
csv: bool = True
ocsf: bool = True
class AttributeMetadata(BaseModel):
"""Schema descriptor for a single attribute field in a universal compliance framework."""
key: str
label: Optional[str] = None
type: str = "str" # str, int, float, list_str, list_dict, bool
enum: Optional[list] = None
required: bool = False
enum_display: Optional[dict] = None # enum_value -> EnumValueDisplay dict
enum_order: Optional[list] = None # explicit ordering of enum values
chart_label: Optional[str] = None # axis label when used in charts
output_formats: OutputFormats = Field(default_factory=OutputFormats)
class SplitByConfig(BaseModel):
"""Column-splitting configuration (e.g. CIS Level 1/Level 2)."""
field: str
values: list
class ScoringConfig(BaseModel):
"""Weighted scoring configuration (e.g. ThreatScore)."""
risk_field: str
weight_field: str
class TableLabels(BaseModel):
"""Custom pass/fail labels for console table rendering."""
pass_label: str = "PASS"
fail_label: str = "FAIL"
provider_header: str = "Provider"
group_header: Optional[str] = None
status_header: str = "Status"
title: Optional[str] = None
results_title: Optional[str] = None
footer_note: Optional[str] = None
class TableConfig(BaseModel):
"""Declarative rendering instructions for the console compliance table."""
group_by: str
split_by: Optional[SplitByConfig] = None
scoring: Optional[ScoringConfig] = None
labels: Optional[TableLabels] = None
class EnumValueDisplay(BaseModel):
"""Per-enum-value visual metadata for PDF rendering.
Replaces hardcoded DIMENSION_MAPPING, TIPO_ICONS, nivel colors.
"""
label: Optional[str] = None # "Trazabilidad"
abbreviation: Optional[str] = None # "T"
color: Optional[str] = None # "#4286F4"
icon: Optional[str] = None # emoji
class ChartConfig(BaseModel):
"""Declarative chart description for PDF reports."""
id: str
type: str # vertical_bar | horizontal_bar | radar
group_by: str # attribute key to group by
title: Optional[str] = None
x_label: Optional[str] = None
y_label: Optional[str] = None
value_source: str = "compliance_percent"
color_mode: str = "by_value" # by_value | fixed | by_group
fixed_color: Optional[str] = None
class ScoringFormula(BaseModel):
"""Weighted scoring formula (e.g. ThreatScore)."""
risk_field: str # "LevelOfRisk"
weight_field: str # "Weight"
risk_boost_factor: float = 0.25 # rfac = 1 + factor * risk_level
class CriticalRequirementsFilter(BaseModel):
"""Filter for critical requirements section in PDF reports."""
filter_field: str # "LevelOfRisk"
min_value: Optional[int] = None # 4 (int-based filter)
filter_value: Optional[str] = None # "alto" (string-based filter)
status_filter: str = "FAIL"
title: Optional[str] = None # "Critical Failed Requirements"
class ReportFilter(BaseModel):
"""Default report filtering for PDF generation."""
only_failed: bool = True
include_manual: bool = False
class I18nLabels(BaseModel):
"""Localized labels for PDF report rendering."""
report_title: Optional[str] = None
page_label: str = "Page"
powered_by: str = "Powered by Prowler"
framework_label: str = "Framework:"
version_label: str = "Version:"
provider_label: str = "Provider:"
description_label: str = "Description:"
compliance_score_label: str = "Compliance Score by Sections"
requirements_index_label: str = "Requirements Index"
detailed_findings_label: str = "Detailed Findings"
class PDFConfig(BaseModel):
"""Declarative PDF report configuration.
Drives the API report generator from JSON data instead of hardcoded
Python config. Colors are hex strings (e.g. '#336699').
"""
language: str = "en"
logo_filename: Optional[str] = None
primary_color: Optional[str] = None
secondary_color: Optional[str] = None
bg_color: Optional[str] = None
sections: Optional[list] = None
section_short_names: Optional[dict] = None
group_by_field: Optional[str] = None
sub_group_by_field: Optional[str] = None
section_titles: Optional[dict] = None
charts: Optional[list] = None
scoring: Optional[ScoringFormula] = None
critical_filter: Optional[CriticalRequirementsFilter] = None
filter: Optional[ReportFilter] = None
labels: Optional[I18nLabels] = None
class UniversalComplianceRequirement(BaseModel):
"""Universal requirement with flat dict-based attributes."""
id: str
description: str
name: Optional[str] = None
attributes: dict = Field(default_factory=dict)
checks: dict[str, list[str]] = Field(default_factory=dict)
tactics: Optional[list] = None
sub_techniques: Optional[list] = None
platforms: Optional[list] = None
technique_url: Optional[str] = None
class OutputsConfig(BaseModel):
"""Container for output-related configuration (table, PDF, etc.)."""
table_config: Optional[TableConfig] = None
pdf_config: Optional[PDFConfig] = None
class ComplianceFramework(BaseModel):
"""Universal top-level container for any compliance framework.
Provider may be explicit (single-provider JSON) or derived from checks
keys across all requirements.
"""
framework: str
name: str
provider: Optional[str] = None
version: Optional[str] = None
description: str
icon: Optional[str] = None
requirements: list[UniversalComplianceRequirement]
attributes_metadata: Optional[list[AttributeMetadata]] = None
outputs: Optional[OutputsConfig] = None
@root_validator
# noqa: F841 - since vulture raises unused variable 'cls'
def validate_attributes_against_metadata(cls, values): # noqa: F841
"""Validate every Requirement's attributes dict against attributes_metadata.
Checks:
- Required keys (required=True) must be present in each Requirement.
- Enum-constrained keys must have a value within the declared enum list.
- Basic type validation (int, float, bool) for non-None values.
"""
metadata = values.get("attributes_metadata")
requirements = values.get("requirements", [])
if not metadata:
return values
required_keys = {m.key for m in metadata if m.required}
valid_keys = {m.key for m in metadata}
enum_map = {m.key: m.enum for m in metadata if m.enum}
type_map = {m.key: m.type for m in metadata}
type_checks = {
"int": int,
"float": (int, float),
"bool": bool,
}
errors = []
for req in requirements:
attrs = req.attributes
# Required keys
for key in required_keys:
if key not in attrs or attrs[key] is None:
errors.append(
f"Requirement '{req.id}': missing required attribute '{key}'"
)
# Unknown keys — anything outside the declared schema is a typo or drift
unknown_keys = set(attrs) - valid_keys
for key in sorted(unknown_keys):
errors.append(
f"Requirement '{req.id}': unknown attribute '{key}' "
f"(not declared in attributes_metadata)"
)
# Enum validation
for key, allowed in enum_map.items():
if key in attrs and attrs[key] is not None:
if attrs[key] not in allowed:
errors.append(
f"Requirement '{req.id}': attribute '{key}' value "
f"'{attrs[key]}' not in {allowed}"
)
# Type validation for non-string types
for key in attrs:
if key not in valid_keys or attrs[key] is None:
continue
expected_type = type_map.get(key, "str")
py_type = type_checks.get(expected_type)
if py_type and not isinstance(attrs[key], py_type):
errors.append(
f"Requirement '{req.id}': attribute '{key}' expected "
f"type {expected_type}, got {type(attrs[key]).__name__}"
)
if errors:
detail = "\n ".join(errors)
raise ValueError(f"attributes_metadata validation failed:\n {detail}")
return values
def get_providers(self) -> list:
"""Derive the set of providers this framework supports.
Inspects checks keys across all requirements. Falls back to the
explicit provider field for single-provider frameworks with no
requirement-level checks.
"""
providers = set()
for req in self.requirements:
providers.update(k.lower() for k in req.checks.keys())
if self.provider and not providers:
providers.add(self.provider.lower())
return sorted(providers)
def supports_provider(self, provider: str) -> bool:
"""Return True if this framework has checks for the given provider."""
provider_lower = provider.lower()
for req in self.requirements:
if any(k.lower() == provider_lower for k in req.checks.keys()):
return True
return self.provider is not None and self.provider.lower() == provider_lower
# ─── Legacy-to-Universal Adapter (Phase 2) ──────────────────────────────────
def _infer_attribute_metadata(legacy: Compliance) -> Optional[list[AttributeMetadata]]:
"""Introspect the first requirement's attribute model to build attributes_metadata."""
try:
if not legacy.Requirements:
return None
first_req = legacy.Requirements[0]
# MITRE requirements have Tactics at top level, not in Attributes
if isinstance(first_req, Mitre_Requirement):
return None
if not first_req.Attributes:
return None
sample_attr = first_req.Attributes[0]
metadata = []
for field_name, field_obj in sample_attr.__fields__.items():
field_type = field_obj.outer_type_
type_str = "str"
enum_values = None
origin = getattr(field_type, "__origin__", None)
if field_type is int:
type_str = "int"
elif field_type is float:
type_str = "float"
elif field_type is bool:
type_str = "bool"
elif origin is list:
args = getattr(field_type, "__args__", ())
if args and args[0] is dict:
type_str = "list_dict"
else:
type_str = "list_str"
elif isinstance(field_type, type) and issubclass(field_type, Enum):
type_str = "str"
enum_values = [e.value for e in field_type]
metadata.append(
AttributeMetadata(
key=field_name,
type=type_str,
enum=enum_values,
required=field_obj.required,
)
)
return metadata
except Exception:
return None
def adapt_legacy_to_universal(legacy: Compliance) -> ComplianceFramework:
"""Convert a legacy Compliance object to a ComplianceFramework."""
universal_requirements = []
legacy_provider_key = legacy.Provider.lower()
for req in legacy.Requirements:
req_checks = {legacy_provider_key: list(req.Checks)} if req.Checks else {}
if isinstance(req, Mitre_Requirement):
# For MITRE, promote special fields and store raw attributes
raw_attrs = [attr.dict() for attr in req.Attributes]
attrs = {"_raw_attributes": raw_attrs}
universal_requirements.append(
UniversalComplianceRequirement(
id=req.Id,
description=req.Description,
name=req.Name,
attributes=attrs,
checks=req_checks,
tactics=req.Tactics,
sub_techniques=req.SubTechniques,
platforms=req.Platforms,
technique_url=req.TechniqueURL,
)
)
else:
# Standard requirement: flatten first attribute to dict
if req.Attributes:
attrs = req.Attributes[0].dict()
else:
attrs = {}
universal_requirements.append(
UniversalComplianceRequirement(
id=req.Id,
description=req.Description,
name=req.Name,
attributes=attrs,
checks=req_checks,
)
)
inferred_metadata = _infer_attribute_metadata(legacy)
return ComplianceFramework(
framework=legacy.Framework,
name=legacy.Name,
provider=legacy.Provider,
version=legacy.Version,
description=legacy.Description,
requirements=universal_requirements,
attributes_metadata=inferred_metadata,
)
def load_compliance_framework_universal(path: str) -> ComplianceFramework:
"""Load a compliance JSON as a ComplianceFramework, handling both new and legacy formats."""
try:
with open(path, "r") as f:
data = json.load(f)
if "attributes_metadata" in data or "requirements" in data:
# New universal format — parse directly
return ComplianceFramework(**data)
else:
# Legacy format — parse as Compliance, then adapt
legacy = Compliance(**data)
return adapt_legacy_to_universal(legacy)
except Exception as e:
logger.error(
f"Failed to load universal compliance framework from {path}: "
f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}"
)
return None
def _load_jsons_from_dir(dir_path: str, provider: str, bulk: dict) -> None:
"""Scan *dir_path* for JSON files and add matching frameworks to *bulk*."""
for filename in os.listdir(dir_path):
file_path = os.path.join(dir_path, filename)
if not (
os.path.isfile(file_path)
and filename.endswith(".json")
and os.stat(file_path).st_size > 0
):
continue
framework_name = filename.split(".json")[0]
if framework_name in bulk:
continue
fw = load_compliance_framework_universal(file_path)
if fw is None:
continue
if fw.provider and fw.provider.lower() == provider.lower():
bulk[framework_name] = fw
elif fw.supports_provider(provider):
bulk[framework_name] = fw
def get_bulk_compliance_frameworks_universal(provider: str) -> dict:
"""Bulk load all compliance frameworks relevant to the given provider.
Scans:
1. The **top-level** ``prowler/compliance/`` directory for multi-provider
JSONs (``Checks`` keyed by provider, no ``Provider`` field).
2. Every **provider sub-directory** (``prowler/compliance/{p}/``) so that
single-provider JSONs are also picked up.
A framework is included when its explicit ``Provider`` matches
(case-insensitive) **or** any requirement has dict-style ``Checks``
with a key for *provider*.
"""
bulk = {}
try:
available_modules = list_compliance_modules()
# Resolve the compliance root once (parent of provider sub-dirs).
compliance_root = None
seen_paths = set()
for module in available_modules:
dir_path = f"{module.module_finder.path}/{module.name.split('.')[-1]}"
if not os.path.isdir(dir_path) or dir_path in seen_paths:
continue
seen_paths.add(dir_path)
# Remember the root the first time we see a valid sub-dir.
if compliance_root is None:
compliance_root = module.module_finder.path
_load_jsons_from_dir(dir_path, provider, bulk)
# Also scan top-level compliance/ for provider-agnostic JSONs.
if compliance_root and os.path.isdir(compliance_root):
_load_jsons_from_dir(compliance_root, provider, bulk)
except Exception as e:
logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}")
return bulk
+1
View File
@@ -280,6 +280,7 @@ class Provider(ABC):
mutelist_path=arguments.mutelist_file,
config_path=arguments.config_file,
repositories=repos,
repo_list_file=getattr(arguments, "repo_list_file", None),
organizations=orgs,
)
elif "googleworkspace" in provider_class_name.lower():
@@ -34,6 +34,14 @@ class GithubBaseException(ProwlerException):
"message": "The provided provider ID does not match with the authenticated user or accessible organizations",
"remediation": "Check the provider ID and ensure it matches the authenticated user or an organization you have access to.",
},
(5007, "GithubRepoListFileNotFoundError"): {
"message": "The repo list file was not found",
"remediation": "Check the file path and ensure it exists.",
},
(5008, "GithubRepoListFileReadError"): {
"message": "Error reading the repo list file",
"remediation": "Check the file permissions and format.",
},
}
def __init__(self, code, file=None, original_exception=None, message=None):
@@ -104,3 +112,21 @@ class GithubInvalidProviderIdError(GithubCredentialsError):
super().__init__(
5006, file=file, original_exception=original_exception, message=message
)
class GithubRepoListFileNotFoundError(GithubBaseException):
"""Exception raised when the repo list file is not found."""
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
5007, file=file, original_exception=original_exception, message=message
)
class GithubRepoListFileReadError(GithubBaseException):
"""Exception raised when the repo list file cannot be read."""
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
5008, file=file, original_exception=original_exception, message=message
)
@@ -22,6 +22,8 @@ from prowler.providers.github.exceptions.exceptions import (
GithubInvalidCredentialsError,
GithubInvalidProviderIdError,
GithubInvalidTokenError,
GithubRepoListFileNotFoundError,
GithubRepoListFileReadError,
GithubSetUpIdentityError,
GithubSetUpSessionError,
)
@@ -90,6 +92,8 @@ class GithubProvider(Provider):
_type: str = "github"
_auth_method: str = None
MAX_REPO_LIST_LINES: int = 10_000
MAX_REPO_NAME_LENGTH: int = 500
_session: GithubSession
_identity: GithubIdentityInfo
_audit_config: dict
@@ -113,6 +117,7 @@ class GithubProvider(Provider):
mutelist_path: str = None,
mutelist_content: dict = None,
repositories: list = None,
repo_list_file: str = None,
organizations: list = None,
):
"""
@@ -130,6 +135,7 @@ class GithubProvider(Provider):
mutelist_path (str): Path to the mutelist file.
mutelist_content (dict): Mutelist content.
repositories (list): List of repository names to scan in 'owner/repo-name' format.
repo_list_file (str): Path to a file containing repository names (one per line).
organizations (list): List of organization or user names to scan repositories for.
"""
logger.info("Instantiating GitHub Provider...")
@@ -147,6 +153,10 @@ class GithubProvider(Provider):
else:
self._repositories = list(repositories)
# Load repos from file if provided
if repo_list_file:
self._load_repos_from_file(repo_list_file)
if organizations is None:
self._organizations = []
elif isinstance(organizations, str):
@@ -256,6 +266,46 @@ class GithubProvider(Provider):
"""
return self._organizations
def _load_repos_from_file(self, file_path: str) -> None:
"""Load repository names from a file (one per line)."""
try:
repo_count = 0
before = len(self._repositories)
with open(file_path, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
repo_count += 1
if repo_count > self.MAX_REPO_LIST_LINES:
raise GithubRepoListFileReadError(
file=file_path,
message=f"Repo list file exceeds maximum of {self.MAX_REPO_LIST_LINES} lines.",
)
if len(line) > self.MAX_REPO_NAME_LENGTH:
logger.warning(
f"Skipping repo name exceeding {self.MAX_REPO_NAME_LENGTH} chars at line {repo_count} in {file_path}"
)
continue
self._repositories.append(line)
self._repositories = list(dict.fromkeys(self._repositories))
logger.info(
f"Loaded {len(self._repositories) - before} repositories from {file_path}"
)
except FileNotFoundError:
raise GithubRepoListFileNotFoundError(
file=file_path,
message=f"Repo list file not found: {file_path}",
)
except (GithubRepoListFileReadError, GithubRepoListFileNotFoundError):
raise
except Exception as error:
raise GithubRepoListFileReadError(
file=file_path,
original_exception=error,
message=f"Error reading repo list file: {error}",
)
@staticmethod
def setup_session(
personal_access_token: str = None,
@@ -50,6 +50,12 @@ def init_parser(self):
default=None,
metavar="REPOSITORY",
)
github_scoping_subparser.add_argument(
"--repo-list-file",
dest="repo_list_file",
default=None,
help="Path to a file containing a list of repositories to scan (one per line in 'owner/repo-name' format). Lines starting with # are treated as comments.",
)
github_scoping_subparser.add_argument(
"--organization",
"--organizations",
+1 -1
View File
@@ -95,7 +95,7 @@ maintainers = [{name = "Prowler Engineering", email = "engineering@prowler.com"}
name = "prowler"
readme = "README.md"
requires-python = ">=3.10,<3.13"
version = "5.24.3"
version = "5.25.0"
[project.scripts]
prowler = "prowler.__main__:prowler"
+568 -10
View File
@@ -1,16 +1,28 @@
---
name: prowler-compliance
description: >
Creates and manages Prowler compliance frameworks.
Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2, GDPR, ISO27001, ENS, MITRE ATT&CK).
Creates, syncs, audits and manages Prowler compliance frameworks end-to-end.
Covers the four-layer architecture (SDK models → JSON catalogs → output
formatters → API/UI), upstream sync workflows, cloud-auditor check-mapping
reviews, output formatter creation, and framework-specific attribute models.
Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2,
GDPR, ISO27001, ENS, MITRE ATT&CK, CCC, C5, CSA CCM, KISA ISMS-P,
Prowler ThreatScore, FedRAMP, HIPAA), syncing with upstream catalogs,
auditing check-to-requirement mappings, adding output formatters, or fixing
compliance JSON bugs (duplicate IDs, empty Version, wrong Section, stale
check refs).
license: Apache-2.0
metadata:
author: prowler-cloud
version: "1.1"
version: "1.2"
scope: [root, sdk]
auto_invoke:
- "Creating/updating compliance frameworks"
- "Mapping checks to compliance controls"
- "Syncing compliance framework with upstream catalog"
- "Auditing check-to-requirement mappings as a cloud auditor"
- "Adding a compliance output formatter (per-provider class + table dispatcher)"
- "Fixing compliance JSON bugs (duplicate IDs, empty Section, stale refs)"
allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task
---
@@ -18,10 +30,82 @@ allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task
Use this skill when:
- Creating a new compliance framework for any provider
- **Syncing an existing framework with an upstream source of truth** (CIS, FINOS CCC, CSA CCM, NIST, ENS, etc.)
- Adding requirements to existing frameworks
- Mapping checks to compliance controls
- **Auditing existing check mappings as a cloud auditor** (user asks "are these mappings correct?", "which checks apply to this requirement?", "review the mappings")
- **Adding a new output formatter** (new framework needs a table dispatcher + per-provider classes + CSV models)
- **Fixing JSON bugs**: duplicate IDs, empty Version, wrong Section, stale check refs, inconsistent FamilyName, padded tangential check mappings
- **Registering a framework in the CLI table dispatcher or API export map**
- Investigating why a finding/check isn't showing under the expected compliance framework in the UI
- Understanding compliance framework structures and attributes
## Four-Layer Architecture (Mental Model)
Prowler compliance is a **four-layer system** hanging off one Pydantic model tree. Bugs usually happen where one layer doesn't match another, so know all four before touching anything.
### Layer 1: SDK / Core Models — `prowler/lib/check/`
- **`compliance_models.py`** — Pydantic **v1** model tree (`from pydantic.v1 import`). One `*_Requirement_Attribute` class per framework type + `Generic_Compliance_Requirement_Attribute` as fallback.
- `Compliance_Requirement.Attributes: list[Union[...]]`**`Generic_Compliance_Requirement_Attribute` MUST be LAST** in the Union or every framework-specific attribute falls through to Generic (Pydantic v1 tries union members in order).
- **`compliance.py`** — runtime linker. `get_check_compliance()` builds the key as `f"{Framework}-{Version}"` **only if `Version` is non-empty**. An empty Version makes the key just `"{Framework}"` — this breaks downstream filters and tests that expect the versioned key.
- `Compliance.get_bulk(provider)` walks `prowler/compliance/{provider}/` and parses every `.json` file. No central index — just directory scan.
### Layer 2: JSON Frameworks — `prowler/compliance/{provider}/`
See "Compliance Framework Location" and "Framework-Specific Attribute Structures" sections below.
### Layer 3: Output Formatters — `prowler/lib/outputs/compliance/{framework}/`
**Every framework directory follows this exact convention** — do not deviate:
```
{framework}/
├── __init__.py
├── {framework}.py # ONLY get_{framework}_table() — NO function docstring
├── {framework}_{provider}.py # One class per provider (e.g., CCC_AWS, CCC_Azure, CCC_GCP)
└── models.py # One Pydantic v2 BaseModel per provider (CSV columns)
```
- **`{framework}.py`** holds the **table dispatcher function** `get_{framework}_table()`. It prints the pass/fail/muted summary table. **Must NOT import `Finding` or `ComplianceOutput`** — doing so creates a circular import with `prowler/lib/outputs/compliance/compliance.py`. Only imports: `colorama`, `tabulate`, `prowler.config.config.orange_color`.
- **`{framework}_{provider}.py`** holds a per-provider class like `CCC_AWS(ComplianceOutput)` with a `transform()` method that walks findings and emits rows. This file IS allowed to import `Finding` because it's not on the dispatcher import chain.
- **`models.py`** holds one Pydantic v2 `BaseModel` per provider. Field names become CSV column headers (**public API** — renaming breaks downstream consumers).
- **Never collapse per-provider files into a unified parameterized class**, even when DRY-tempting. Every framework in Prowler follows the per-provider file pattern and reviewers will reject the refactor. CSV columns differ per provider (`AccountId`/`Region` vs `SubscriptionId`/`Location` vs `ProjectId`/`Location`) — three classes is the convention.
- **No function docstring on `get_{framework}_table()`** — no other framework has one; stay consistent.
- Register in `prowler/lib/outputs/compliance/compliance.py``display_compliance_table()` with an `elif compliance_framework.startswith("{framework}_"):` branch. Import the table function at the top of the file.
### Layer 4: API / UI
- **API table dispatcher**: `api/src/backend/tasks/jobs/export.py``COMPLIANCE_CLASS_MAP` keyed by provider. Uses `startswith` predicates: `(lambda name: name.startswith("ccc_"), CCC_AWS)`. **Never use exact match** (`name == "ccc_aws"`) — it's inconsistent and breaks versioning.
- **API lazy loader**: `api/src/backend/api/compliance.py``LazyComplianceTemplate` and `LazyChecksMapping` load compliance per provider on first access.
- **UI mapper routing**: `ui/lib/compliance/compliance-mapper.ts` routes framework names → per-framework mapper.
- **UI per-framework mapper**: `ui/lib/compliance/{framework}.tsx` flattens `Requirements` into a 3-level tree (Framework → Category → Control → Requirement) for the accordion view. Groups by `Attributes[0].FamilyName` and `Attributes[0].Section`.
- **UI detail panel**: `ui/components/compliance/compliance-custom-details/{framework}-details.tsx`.
- **UI types**: `ui/types/compliance.ts` — TypeScript mirrors of the attribute metadata.
### The CLI Pipeline (end-to-end)
```
prowler aws --compliance ccc_aws
Compliance.get_bulk("aws") → parses prowler/compliance/aws/*.json
update_checks_metadata_with_compliance() → attaches compliance info to CheckMetadata
execute_checks() → runs checks, produces Finding objects
get_check_compliance(finding, "aws", bulk_checks_metadata)
→ dict "{Framework}-{Version}" → [requirement_ids]
CCC_AWS(findings, compliance).transform() → per-provider class builds CSV rows
batch_write_data_to_file() → writes {output_filename}_ccc_aws.csv
display_compliance_table() → get_ccc_table() → prints stdout summary
```
---
## Compliance Framework Location
Frameworks are JSON files located in: `prowler/compliance/{provider}/{framework_name}_{provider}.json`
@@ -455,14 +539,453 @@ Prowler ThreatScore is a custom security scoring framework developed by Prowler
- **M365:** `cis_4.0_m365.json`, `iso27001_2022_m365.json`
- **NHN:** `iso27001_2022_nhn.json`
## Workflow A: Sync a Framework With an Upstream Catalog
Use when the framework is maintained upstream (CIS Benchmarks, FINOS CCC, CSA CCM, NIST, ENS, etc.) and Prowler needs to catch up.
### Step 1 — Cache the upstream source
Download every upstream file to a local cache so subsequent iterations don't hit the network. For FINOS CCC:
```bash
mkdir -p /tmp/ccc_upstream
catalogs="core/ccc storage/object management/auditlog management/logging ..."
for p in $catalogs; do
safe=$(echo "$p" | tr '/' '_')
gh api "repos/finos/common-cloud-controls/contents/catalogs/$p/controls.yaml" \
-H "Accept: application/vnd.github.raw" > "/tmp/ccc_upstream/${safe}.yaml"
done
```
### Step 2 — Run the generic sync runner against a framework config
The sync tooling is split into three layers so adding a new framework only takes a YAML config (and optionally a new parser module for an unfamiliar upstream format):
```
skills/prowler-compliance/assets/
├── sync_framework.py # generic runner — works for any framework
├── configs/
│ └── ccc.yaml # per-framework config (canonical example)
└── parsers/
├── __init__.py
└── finos_ccc.py # parser module for FINOS CCC YAML
```
**For frameworks that already have a config + parser** (today: FINOS CCC), run:
```bash
python skills/prowler-compliance/assets/sync_framework.py \
skills/prowler-compliance/assets/configs/ccc.yaml
```
The runner loads the config, validates it, dynamically imports the parser declared in `parser.module`, calls `parser.parse_upstream(config) -> list[dict]`, then applies generic post-processing (id uniqueness safety net, `FamilyName` normalization, legacy check-mapping preservation) and writes the provider JSONs.
**To add a new framework sync**:
1. **Write a config file** at `skills/prowler-compliance/assets/configs/{framework}.yaml`. See `configs/ccc.yaml` as the canonical example. Required top-level sections:
- `framework``name`, `display_name`, `version` (**never empty** — empty Version silently breaks `get_check_compliance()` key construction, so the runner refuses to start), `description_template` (accepts `{provider_display}`, `{provider_key}`, `{framework_name}`, `{framework_display}`, `{version}` placeholders).
- `providers` — list of `{key, display}` pairs, one per Prowler provider the framework targets.
- `output.path_template` — supports `{provider}`, `{framework}`, `{version}` placeholders. Examples: `"prowler/compliance/{provider}/ccc_{provider}.json"` for unversioned file names, `"prowler/compliance/{provider}/cis_{version}_{provider}.json"` for versioned ones.
- `upstream.dir` — local cache directory (populate via Step 1).
- `parser.module` — name of the module under `parsers/` to load (without `.py`). Everything else under `parser.` is opaque to the runner and passed to the parser as config.
- `post_processing.check_preservation.primary_key` — top-level field name for the primary legacy-mapping lookup (almost always `Id`).
- `post_processing.check_preservation.fallback_keys`**config-driven fallback keys** for preserving check mappings when ids change. Each entry is a list of `Attributes[0]` field names composed into a tuple. Examples:
- CCC: `- [Section, Applicability]` (because `Applicability` is a CCC-only attribute, verified in `compliance_models.py:213`).
- CIS would use `- [Section, Profile]`.
- NIST would use `- [ItemId]`.
- List-valued fields (like `Applicability`) are automatically frozen to `frozenset` so the tuple is hashable.
- `post_processing.family_name_normalization` (optional) — map of raw → canonical `FamilyName` values. The UI groups by `Attributes[0].FamilyName` exactly, so inconsistent upstream variants otherwise become separate tree branches.
2. **Reuse an existing parser** if the upstream format matches one (currently only `finos_ccc` exists). Otherwise, **write a new parser** at `parsers/{name}.py` implementing:
```python
def parse_upstream(config: dict) -> list[dict]:
"""Return Prowler-format requirements {Id, Description, Attributes: [...], Checks: []}.
Ids MUST be unique in the returned list. The runner raises ValueError
on duplicates — it does NOT silently renumber, because mutating a
canonical upstream id (e.g. CIS '1.1.1' or NIST 'AC-2(1)') would be
catastrophic. The parser owns all upstream-format quirks: foreign-prefix
rewriting, genuine collision renumbering, shape handling.
"""
```
The parser reads its own settings from `config['upstream']` and `config['parser']`. It does NOT load existing Prowler JSONs (the runner does that for check preservation) and does NOT write output (the runner does that too).
**Gotchas the runner already handles for you** (learned from the FINOS CCC v2025.10 sync — they're documented here so you don't re-discover them):
- **Multiple upstream YAML shapes**. Most FINOS CCC catalogs use `control-families: [...]`, but `storage/object` uses a top-level `controls: [...]` with a `family: "CCC.X.Y"` reference id and no human-readable family name. A parser that only handles shape 1 silently drops the shape-2 catalog — this exact bug dropped ObjStor from Prowler for a full iteration. `parsers/finos_ccc.py` handles both shapes; if you write a new parser for a similar format, test with at least one file of each shape.
- **Whitespace collapse**. Upstream YAML multi-line block scalars (`|`) preserve newlines. Prowler stores descriptions single-line. Collapse with `" ".join(value.split())` before emitting (see `parsers/finos_ccc.py::clean()`).
- **Foreign-prefix AR id rewriting**. Upstream sometimes aliases requirements across catalogs by keeping the original prefix (e.g., `CCC.AuditLog.CN08.AR01` appears nested under `CCC.Logging.CN03`). Rewrite the foreign id to fit its parent control: `CCC.Logging.CN03.AR01`. This logic is parser-specific because the id structure varies per framework (CCC uses 3-dot depth; CIS uses numeric dots; NIST uses `AC-2(1)`).
- **Genuine upstream collision renumbering**. Sometimes upstream has a real typo where two different requirements share the same id (e.g., `CCC.Core.CN14.AR02` defined twice for 30-day and 14-day backup variants). Renumber the second copy to the next free AR number (`.AR03`). The parser handles this; the runner asserts the final list has unique ids as a safety net.
- **Existing check mapping preservation**. The runner uses the `primary_key` + `fallback_keys` declared in config to look up the old `Checks` list for each requirement. For CCC this means primary index by `Id` plus fallback index by `(Section, frozenset(Applicability))` — the fallback recovers mappings for requirements whose ids were rewritten or renumbered by the parser.
- **FamilyName normalization**. Configured via `post_processing.family_name_normalization` — no code changes needed to collapse upstream variants like `"Logging & Monitoring"``"Logging and Monitoring"`.
- **Populate `Version`**. The runner refuses to start on empty `framework.version` — fail-fast replaces the silent bug where `get_check_compliance()` would build the key as just `"{Framework}"`.
### Step 3 — Validate before committing
```python
from prowler.lib.check.compliance_models import Compliance
for prov in ['aws', 'azure', 'gcp']:
c = Compliance.parse_file(f"prowler/compliance/{prov}/ccc_{prov}.json")
print(f"{prov}: {len(c.Requirements)} reqs, version={c.Version}")
```
Any `ValidationError` means the Attribute fields don't match the `*_Requirement_Attribute` model. Either fix the JSON or extend the model in `compliance_models.py` (remember: Generic stays last).
### Step 4 — Verify every check id exists
```python
import json
from pathlib import Path
for prov in ['aws', 'azure', 'gcp']:
existing = {p.stem.replace('.metadata','')
for p in Path(f'prowler/providers/{prov}/services').rglob('*.metadata.json')}
with open(f'prowler/compliance/{prov}/ccc_{prov}.json') as f:
data = json.load(f)
refs = {c for r in data['Requirements'] for c in r['Checks']}
missing = refs - existing
assert not missing, f"{prov} missing: {missing}"
```
A stale check id silently becomes dead weight — no finding will ever map to it. This pre-validation **must run on every write**; bake it into the generator script.
### Step 5 — Add an attribute model if needed
Only if the framework has fields beyond `Generic_Compliance_Requirement_Attribute`. Add the class to `prowler/lib/check/compliance_models.py` and register it in `Compliance_Requirement.Attributes: list[Union[...]]`. **Generic stays last.**
---
## Workflow B: Audit Check Mappings as a Cloud Auditor
Use when the user asks to review existing mappings ("are these correct?", "verify that the checks apply", "audit the CCC mappings"). This is the highest-value compliance task — it surfaces padded mappings with zero actual coverage and missing mappings for legitimate coverage.
### The golden rule
> A Prowler check's title/risk MUST **literally describe what the requirement text says**. "Related" is not enough. If no check actually addresses the requirement, leave `Checks: []` (MANUAL) — **honest MANUAL is worth more than padded coverage**.
### Audit process
**Step 1 — Build a per-provider check inventory** (cache in `/tmp/`):
```python
import json
from pathlib import Path
for provider in ['aws', 'azure', 'gcp']:
inv = {}
for meta in Path(f'prowler/providers/{provider}/services').rglob('*.metadata.json'):
with open(meta) as f:
d = json.load(f)
cid = d.get('CheckID') or meta.stem.replace('.metadata','')
inv[cid] = {
'service': d.get('ServiceName', ''),
'title': d.get('CheckTitle', ''),
'risk': d.get('Risk', ''),
'description': d.get('Description', ''),
}
with open(f'/tmp/checks_{provider}.json', 'w') as f:
json.dump(inv, f, indent=2)
```
**Step 2 — Keyword/service query helper** — see [assets/query_checks.py](assets/query_checks.py):
```bash
python assets/query_checks.py aws encryption transit # keyword AND-search
python assets/query_checks.py aws --service iam # all iam checks
python assets/query_checks.py aws --id kms_cmk_rotation_enabled # full metadata
```
**Step 3 — Dump a framework section with current mappings** — see [assets/dump_section.py](assets/dump_section.py):
```bash
python assets/dump_section.py ccc "CCC.Core." # all Core ARs across 3 providers
python assets/dump_section.py ccc "CCC.AuditLog." # all AuditLog ARs
```
**Step 4 — Encode explicit REPLACE decisions** — see [assets/audit_framework_template.py](assets/audit_framework_template.py). Structure:
```python
DECISIONS = {}
DECISIONS["CCC.Core.CN01.AR01"] = {
"aws": [
"cloudfront_distributions_https_enabled",
"cloudfront_distributions_origin_traffic_encrypted",
# ...
],
"azure": [
"storage_secure_transfer_required_is_enabled",
"app_minimum_tls_version_12",
# ...
],
"gcp": [
"cloudsql_instance_ssl_connections",
],
# Missing provider key = leave the legacy mapping untouched
}
# Empty list = EXPLICITLY MANUAL (overwrites legacy)
DECISIONS["CCC.Core.CN01.AR07"] = {
"aws": [], # Prowler has no IANA port/protocol check
"azure": [],
"gcp": [],
}
```
**REPLACE, not PATCH.** Encoding every mapping as a full list (not add/remove delta) makes the audit reproducible and surfaces hidden assumptions from the legacy data.
**Step 5 — Pre-validation**. The audit script MUST validate every check id against the inventory and **abort with stderr listing typos**. Common typos caught during a real audit:
- `fsx_file_system_encryption_at_rest_using_kms` (doesn't exist)
- `cosmosdb_account_encryption_at_rest_with_cmk` (doesn't exist)
- `sqlserver_geo_replication` (doesn't exist)
- `redshift_cluster_audit_logging` (should be `redshift_cluster_encrypted_at_rest`)
- `postgresql_flexible_server_require_secure_transport` (should be `postgresql_flexible_server_enforce_ssl_enabled`)
- `storage_secure_transfer_required_enabled` (should be `storage_secure_transfer_required_is_enabled`)
- `sqlserver_minimum_tls_version_12` (should be `sqlserver_recommended_minimal_tls_version`)
**Step 6 — Apply + validate + test**:
```bash
python /path/to/audit_script.py # applies decisions, pre-validates
python -m pytest tests/lib/outputs/compliance/ tests/lib/check/ -q
```
### Audit Reference Table: Requirement Text → Prowler Checks
Use this table to map CCC-style / NIST-style / ISO-style requirements to the checks that actually verify them. Built from a real audit of 172 CCC ARs × 3 providers.
| Requirement text | AWS checks | Azure checks | GCP checks |
|---|---|---|---|
| **TLS in transit enforced** | `cloudfront_distributions_https_enabled`, `s3_bucket_secure_transport_policy`, `elbv2_ssl_listeners`, `elbv2_insecure_ssl_ciphers`, `elb_ssl_listeners`, `elb_insecure_ssl_ciphers`, `opensearch_service_domains_https_communications_enforced`, `rds_instance_transport_encrypted`, `redshift_cluster_in_transit_encryption_enabled`, `elasticache_redis_cluster_in_transit_encryption_enabled`, `dynamodb_accelerator_cluster_in_transit_encryption_enabled`, `dms_endpoint_ssl_enabled`, `kafka_cluster_in_transit_encryption_enabled`, `transfer_server_in_transit_encryption_enabled`, `glue_database_connections_ssl_enabled`, `sns_subscription_not_using_http_endpoints` | `storage_secure_transfer_required_is_enabled`, `storage_ensure_minimum_tls_version_12`, `postgresql_flexible_server_enforce_ssl_enabled`, `mysql_flexible_server_ssl_connection_enabled`, `mysql_flexible_server_minimum_tls_version_12`, `sqlserver_recommended_minimal_tls_version`, `app_minimum_tls_version_12`, `app_ensure_http_is_redirected_to_https`, `app_ftp_deployment_disabled` | `cloudsql_instance_ssl_connections` (almost only option) |
| **TLS 1.3 specifically** | Partial: `cloudfront_distributions_using_deprecated_ssl_protocols`, `elb*_insecure_ssl_ciphers`, `*_minimum_tls_version_12` | Partial: `*_minimum_tls_version_12` checks | None — accept as MANUAL |
| **SSH / port 22 hardening** | `ec2_instance_port_ssh_exposed_to_internet`, `ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22`, `ec2_networkacl_allow_ingress_tcp_port_22` | `network_ssh_internet_access_restricted`, `vm_linux_enforce_ssh_authentication` | `compute_firewall_ssh_access_from_the_internet_allowed`, `compute_instance_block_project_wide_ssh_keys_disabled`, `compute_project_os_login_enabled`, `compute_project_os_login_2fa_enabled` |
| **mTLS (mutual TLS)** | `kafka_cluster_mutual_tls_authentication_enabled`, `apigateway_restapi_client_certificate_enabled` | `app_client_certificates_on` | None — MANUAL |
| **Data at rest encrypted** | `s3_bucket_default_encryption`, `s3_bucket_kms_encryption`, `ec2_ebs_default_encryption`, `ec2_ebs_volume_encryption`, `rds_instance_storage_encrypted`, `rds_cluster_storage_encrypted`, `rds_snapshots_encrypted`, `dynamodb_tables_kms_cmk_encryption_enabled`, `redshift_cluster_encrypted_at_rest`, `neptune_cluster_storage_encrypted`, `documentdb_cluster_storage_encrypted`, `opensearch_service_domains_encryption_at_rest_enabled`, `kinesis_stream_encrypted_at_rest`, `firehose_stream_encrypted_at_rest`, `sns_topics_kms_encryption_at_rest_enabled`, `sqs_queues_server_side_encryption_enabled`, `efs_encryption_at_rest_enabled`, `athena_workgroup_encryption`, `glue_data_catalogs_metadata_encryption_enabled`, `backup_vaults_encrypted`, `backup_recovery_point_encrypted`, `cloudtrail_kms_encryption_enabled`, `cloudwatch_log_group_kms_encryption_enabled`, `eks_cluster_kms_cmk_encryption_in_secrets_enabled`, `sagemaker_notebook_instance_encryption_enabled`, `apigateway_restapi_cache_encrypted`, `kafka_cluster_encryption_at_rest_uses_cmk`, `dynamodb_accelerator_cluster_encryption_enabled`, `storagegateway_fileshare_encryption_enabled` | `storage_infrastructure_encryption_is_enabled`, `storage_ensure_encryption_with_customer_managed_keys`, `vm_ensure_attached_disks_encrypted_with_cmk`, `vm_ensure_unattached_disks_encrypted_with_cmk`, `sqlserver_tde_encryption_enabled`, `sqlserver_tde_encrypted_with_cmk`, `databricks_workspace_cmk_encryption_enabled`, `monitor_storage_account_with_activity_logs_cmk_encrypted` | `compute_instance_encryption_with_csek_enabled`, `dataproc_encrypted_with_cmks_disabled`, `bigquery_dataset_cmk_encryption`, `bigquery_table_cmk_encryption` |
| **CMEK required (customer-managed keys)** | `kms_cmk_are_used` | `storage_ensure_encryption_with_customer_managed_keys`, `vm_ensure_attached_disks_encrypted_with_cmk`, `vm_ensure_unattached_disks_encrypted_with_cmk`, `sqlserver_tde_encrypted_with_cmk`, `databricks_workspace_cmk_encryption_enabled` | `bigquery_dataset_cmk_encryption`, `bigquery_table_cmk_encryption`, `dataproc_encrypted_with_cmks_disabled`, `compute_instance_encryption_with_csek_enabled` |
| **Key rotation enabled** | `kms_cmk_rotation_enabled` | `keyvault_key_rotation_enabled`, `storage_key_rotation_90_days` | `kms_key_rotation_enabled` |
| **MFA for UI access** | `iam_root_mfa_enabled`, `iam_root_hardware_mfa_enabled`, `iam_user_mfa_enabled_console_access`, `iam_user_hardware_mfa_enabled`, `iam_administrator_access_with_mfa`, `cognito_user_pool_mfa_enabled` | `entra_privileged_user_has_mfa`, `entra_non_privileged_user_has_mfa`, `entra_user_with_vm_access_has_mfa`, `entra_security_defaults_enabled` | `compute_project_os_login_2fa_enabled` |
| **API access / credentials** | `iam_no_root_access_key`, `iam_user_no_setup_initial_access_key`, `apigateway_restapi_authorizers_enabled`, `apigateway_restapi_public_with_authorizer`, `apigatewayv2_api_authorizers_enabled` | `entra_conditional_access_policy_require_mfa_for_management_api`, `app_function_access_keys_configured`, `app_function_identity_is_configured` | `apikeys_api_restrictions_configured`, `apikeys_key_exists`, `apikeys_key_rotated_in_90_days` |
| **Log all admin/config changes** | `cloudtrail_multi_region_enabled`, `cloudtrail_multi_region_enabled_logging_management_events`, `cloudtrail_cloudwatch_logging_enabled`, `cloudtrail_log_file_validation_enabled`, `cloudwatch_log_metric_filter_*`, `cloudwatch_changes_to_*_alarm_configured`, `config_recorder_all_regions_enabled` | `monitor_diagnostic_settings_exists`, `monitor_diagnostic_setting_with_appropriate_categories`, `monitor_alert_*` | `iam_audit_logs_enabled`, `logging_log_metric_filter_and_alert_for_*`, `logging_sink_created` |
| **Log integrity (digital signatures)** | `cloudtrail_log_file_validation_enabled` (exact) | None | None |
| **Public access denied** | `s3_bucket_public_access`, `s3_bucket_public_list_acl`, `s3_bucket_public_write_acl`, `s3_account_level_public_access_blocks`, `apigateway_restapi_public`, `awslambda_function_url_public`, `awslambda_function_not_publicly_accessible`, `rds_instance_no_public_access`, `rds_snapshots_public_access`, `ec2_securitygroup_allow_ingress_from_internet_to_all_ports`, `sns_topics_not_publicly_accessible`, `sqs_queues_not_publicly_accessible` | `storage_blob_public_access_level_is_disabled`, `storage_ensure_private_endpoints_in_storage_accounts`, `containerregistry_not_publicly_accessible`, `keyvault_private_endpoints`, `app_function_not_publicly_accessible`, `aks_clusters_public_access_disabled`, `network_http_internet_access_restricted` | `cloudstorage_bucket_public_access`, `compute_instance_public_ip`, `cloudsql_instance_public_ip`, `compute_firewall_*_access_from_the_internet_allowed` |
| **IAM least privilege** | `iam_*_no_administrative_privileges`, `iam_policy_allows_privilege_escalation`, `iam_inline_policy_allows_privilege_escalation`, `iam_role_administratoraccess_policy`, `iam_group_administrator_access_policy`, `iam_user_administrator_access_policy`, `iam_policy_attached_only_to_group_or_roles`, `iam_role_cross_service_confused_deputy_prevention` | `iam_role_user_access_admin_restricted`, `iam_subscription_roles_owner_custom_not_created`, `iam_custom_role_has_permissions_to_administer_resource_locks` | `iam_sa_no_administrative_privileges`, `iam_no_service_roles_at_project_level`, `iam_role_kms_enforce_separation_of_duties`, `iam_role_sa_enforce_separation_of_duties` |
| **Password policy** | `iam_password_policy_minimum_length_14`, `iam_password_policy_uppercase`, `iam_password_policy_lowercase`, `iam_password_policy_symbol`, `iam_password_policy_number`, `iam_password_policy_expires_passwords_within_90_days_or_less`, `iam_password_policy_reuse_24` | None | None |
| **Credential rotation / unused** | `iam_rotate_access_key_90_days`, `iam_user_accesskey_unused`, `iam_user_console_access_unused` | None | `iam_sa_user_managed_key_rotate_90_days`, `iam_sa_user_managed_key_unused`, `iam_service_account_unused` |
| **VPC / flow logs** | `vpc_flow_logs_enabled` | `network_flow_log_captured_sent`, `network_watcher_enabled`, `network_flow_log_more_than_90_days` | `compute_subnet_flow_logs_enabled` |
| **Backup / DR / Multi-AZ** | `backup_vaults_exist`, `backup_plans_exist`, `backup_reportplans_exist`, `rds_instance_backup_enabled`, `rds_*_protected_by_backup_plan`, `rds_cluster_multi_az`, `neptune_cluster_backup_enabled`, `documentdb_cluster_backup_enabled`, `efs_have_backup_enabled`, `s3_bucket_cross_region_replication`, `dynamodb_table_protected_by_backup_plan` | `vm_backup_enabled`, `vm_sufficient_daily_backup_retention_period`, `storage_geo_redundant_enabled` | `cloudsql_instance_automated_backups`, `cloudstorage_bucket_log_retention_policy_lock`, `cloudstorage_bucket_sufficient_retention_period` |
| **Access analysis / discovery** | `accessanalyzer_enabled`, `accessanalyzer_enabled_without_findings` | None specific | `iam_account_access_approval_enabled`, `iam_cloud_asset_inventory_enabled` |
| **Object lock / retention** | `s3_bucket_object_lock`, `s3_bucket_object_versioning`, `s3_bucket_lifecycle_enabled`, `cloudtrail_bucket_requires_mfa_delete`, `s3_bucket_no_mfa_delete` | `storage_ensure_soft_delete_is_enabled`, `storage_blob_versioning_is_enabled`, `storage_ensure_file_shares_soft_delete_is_enabled` | `cloudstorage_bucket_log_retention_policy_lock`, `cloudstorage_bucket_soft_delete_enabled`, `cloudstorage_bucket_versioning_enabled`, `cloudstorage_bucket_sufficient_retention_period` |
| **Uniform bucket-level access** | `s3_bucket_acl_prohibited` | `storage_account_key_access_disabled`, `storage_default_to_entra_authorization_enabled` | `cloudstorage_bucket_uniform_bucket_level_access` |
| **Container vulnerability scanning** | `ecr_registry_scan_images_on_push_enabled`, `ecr_repositories_scan_vulnerabilities_in_latest_image` | `defender_container_images_scan_enabled`, `defender_container_images_resolved_vulnerabilities` | `artifacts_container_analysis_enabled`, `gcr_container_scanning_enabled` |
| **WAF / rate limiting** | `wafv2_webacl_with_rules`, `waf_*_webacl_with_rules`, `wafv2_webacl_logging_enabled`, `waf_global_webacl_logging_enabled` | None | None |
| **Deployment region restriction** | `organizations_scp_check_deny_regions` | None | None |
| **Secrets automatic rotation** | `secretsmanager_automatic_rotation_enabled`, `secretsmanager_secret_rotated_periodically` | `keyvault_rbac_secret_expiration_set`, `keyvault_non_rbac_secret_expiration_set` | None |
| **Certificate management** | `acm_certificates_expiration_check`, `acm_certificates_with_secure_key_algorithms`, `acm_certificates_transparency_logs_enabled` | `keyvault_key_expiration_set_in_non_rbac`, `keyvault_rbac_key_expiration_set`, `keyvault_non_rbac_secret_expiration_set` | None |
| **GenAI guardrails / input/output filtering** | `bedrock_guardrail_prompt_attack_filter_enabled`, `bedrock_guardrail_sensitive_information_filter_enabled`, `bedrock_agent_guardrail_enabled`, `bedrock_model_invocation_logging_enabled`, `bedrock_api_key_no_administrative_privileges`, `bedrock_api_key_no_long_term_credentials` | None | None |
| **ML dev environment security** | `sagemaker_notebook_instance_root_access_disabled`, `sagemaker_notebook_instance_without_direct_internet_access_configured`, `sagemaker_notebook_instance_vpc_settings_configured`, `sagemaker_models_vpc_settings_configured`, `sagemaker_training_jobs_vpc_settings_configured`, `sagemaker_training_jobs_network_isolation_enabled`, `sagemaker_training_jobs_volume_and_output_encryption_enabled` | None | None |
| **Threat detection / anomalous behavior** | `cloudtrail_threat_detection_enumeration`, `cloudtrail_threat_detection_privilege_escalation`, `cloudtrail_threat_detection_llm_jacking`, `guardduty_is_enabled`, `guardduty_no_high_severity_findings` | None | None |
| **Serverless private access** | `awslambda_function_inside_vpc`, `awslambda_function_not_publicly_accessible`, `awslambda_function_url_public` | `app_function_not_publicly_accessible` | None |
### What Prowler Does NOT Cover (accept MANUAL honestly)
Don't pad mappings for these — mark `Checks: []` and move on:
- **TLS 1.3 version specifically** — Prowler verifies TLS is enforced, not always the exact version
- **IANA port-protocol consistency** — no check for "protocol running on its assigned port"
- **mTLS on most Azure/GCP services** — limited to App Service client certs on Azure, nothing on GCP
- **Rate limiting** on monitoring endpoints, load balancers, serverless invocations, vector ingestion
- **Session cookie expiry** (LB stickiness)
- **HTTP header scrubbing** (Server, X-Powered-By)
- **Certificate transparency verification for imports**
- **Model version pinning, red teaming, AI quality review**
- **Vector embedding validation, dimensional constraints, ANN vs exact search**
- **Secret region replication** (cross-region residency)
- **Lifecycle cleanup policies on container registries**
- **Row-level / column-level security in data warehouses**
- **Deployment region restriction on Azure/GCP** (AWS has `organizations_scp_check_deny_regions`, others don't)
- **Cross-tenant alert silencing permissions**
- **Field-level masking in logs**
- **Managed view enforcement for database access**
- **Automatic MFA delete on all S3 buckets** (only CloudTrail bucket variant exists for some frameworks — AWS has the generic `s3_bucket_no_mfa_delete` though)
---
## Workflow C: Add a New Output Formatter
Use when a new framework needs its own CSV columns or terminal table. Follow the c5/csa/ens layout exactly:
```bash
mkdir -p prowler/lib/outputs/compliance/{framework}
touch prowler/lib/outputs/compliance/{framework}/__init__.py
```
### Step 1 — Create `{framework}.py` (table dispatcher ONLY)
Copy from `prowler/lib/outputs/compliance/c5/c5.py` and change the function name + framework string. The `diff` between your file and `c5.py` should be just those two lines. **No function docstring** — other frameworks don't have one, stay consistent.
### Step 2 — Create `models.py`
One Pydantic v2 `BaseModel` per provider. Field names become CSV column headers (public API — don't rename later without a migration).
```python
from typing import Optional
from pydantic import BaseModel
class {Framework}_AWSModel(BaseModel):
Provider: str
Description: str
AccountId: str
Region: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
# ... provider-specific columns
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
Muted: bool
```
### Step 3 — Create `{framework}_{provider}.py` for each provider
Copy from `prowler/lib/outputs/compliance/c5/c5_aws.py` etc. Contains the `{Framework}_AWS(ComplianceOutput)` class with `transform()` that walks findings and emits model rows. This file IS allowed to import `Finding`.
### Step 4 — Register everywhere
**`prowler/lib/outputs/compliance/compliance.py`** (CLI table dispatcher):
```python
from prowler.lib.outputs.compliance.{framework}.{framework} import get_{framework}_table
def display_compliance_table(...):
...
elif compliance_framework.startswith("{framework}_"):
get_{framework}_table(findings, bulk_checks_metadata,
compliance_framework, output_filename,
output_directory, compliance_overview)
```
**`prowler/__main__.py`** (CLI output writer per provider):
Add imports at the top:
```python
from prowler.lib.outputs.compliance.{framework}.{framework}_aws import {Framework}_AWS
from prowler.lib.outputs.compliance.{framework}.{framework}_azure import {Framework}_Azure
from prowler.lib.outputs.compliance.{framework}.{framework}_gcp import {Framework}_GCP
```
Add provider-specific `elif compliance_name.startswith("{framework}_"):` branches that instantiate the class and call `batch_write_data_to_file()`.
**`api/src/backend/tasks/jobs/export.py`** (API export dispatcher):
```python
from prowler.lib.outputs.compliance.{framework}.{framework}_aws import {Framework}_AWS
# ... azure, gcp
COMPLIANCE_CLASS_MAP = {
"aws": [
# ...
(lambda name: name.startswith("{framework}_"), {Framework}_AWS),
],
# ... azure, gcp
}
```
**Always use `startswith`**, never `name == "framework_aws"`. Exact match is a regression.
### Step 5 — Add tests
Create `tests/lib/outputs/compliance/{framework}/` with `{framework}_aws_test.py`, `{framework}_azure_test.py`, `{framework}_gcp_test.py`. See the test template in [references/test_template.md](references/test_template.md).
Add fixtures to `tests/lib/outputs/compliance/fixtures.py`: one `Compliance` object per provider with 1 evaluated + 1 manual requirement to exercise both code paths in `transform()`.
### Circular import warning
**The table dispatcher file (`{framework}.py`) MUST NOT import `Finding`** (directly or transitively). The cycle is:
```
compliance.compliance imports get_{framework}_table
→ {framework}.py imports ComplianceOutput
→ compliance_output imports Finding
→ finding imports get_check_compliance from compliance.compliance
→ CIRCULAR
```
Keep `{framework}.py` bare — only `colorama`, `tabulate`, `prowler.config.config`. Put anything that imports `Finding` in the per-provider `{framework}_{provider}.py` files.
---
## Conventions and Hard-Won Gotchas
These are lessons from the FINOS CCC v2025.10 sync + 172-AR audit pass (April 2026). Learn them once; save days of debugging.
1. **Per-provider files are non-negotiable.** Never collapse `{framework}_aws.py`, `{framework}_azure.py`, `{framework}_gcp.py` into a single parameterized class, no matter how DRY-tempting. Every other framework in the codebase follows the per-provider pattern and reviewers will reject the refactor. The CSV column names differ per provider — three classes is the convention.
2. **`{framework}.py` has NO function docstring.** Other frameworks don't have them. Don't add one to be "helpful".
3. **Circular import protection**: the table dispatcher file MUST NOT import `Finding` (directly or transitively). Split the code so `{framework}.py` only has `get_{framework}_table()` with bare imports, and `{framework}_{provider}.py` holds the class that needs `Finding`.
4. **`Generic_Compliance_Requirement_Attribute` is the fallback** — in the `Compliance_Requirement.Attributes` Union in `compliance_models.py`, Generic MUST be LAST because Pydantic v1 tries union members in order. Putting Generic first means every framework-specific attribute falls through to Generic and the specific model is never used.
5. **Pydantic v1 imports.** `from pydantic.v1 import BaseModel` in `compliance_models.py` — not v2. Mixing causes validation errors. Pydantic v2 is used in the CSV models (`models.py`) — that's fine because they're separate trees.
6. **`get_check_compliance()` key format** is `f"{Framework}-{Version}"` ONLY if Version is set. Empty Version → key is `"{Framework}"` (no version suffix). Tests that mock compliance dicts must match this exact format — when a framework ships with `Version: ""`, downstream code and tests break silently.
7. **CSV column names from `models.py` are public API.** Don't rename a field without migrating downstream consumers — CSV headers change.
8. **Upstream YAML multi-line scalars** (`|` block scalars) preserve newlines. Collapse to single-line with `" ".join(value.split())` before writing to JSON.
9. **Upstream catalogs can use multiple shapes.** FINOS CCC uses `control-families: [...]` in most catalogs but `controls: [...]` at the top level in `storage/object`. Any sync script must handle both or silently drop entire catalogs.
10. **Foreign-prefix AR ids.** Upstream sometimes "imports" requirements from one catalog into another by keeping the original id prefix (e.g., `CCC.AuditLog.CN08.AR01` appearing under `CCC.Logging.CN03`). Prowler's compliance model requires unique ids within a catalog — rewrite the foreign id to fit the parent control: `CCC.AuditLog.CN08.AR01` (inside `CCC.Logging.CN03`) → `CCC.Logging.CN03.AR01`.
11. **Genuine upstream id collisions.** Sometimes upstream has a real typo where two different requirements share the same id (e.g., `CCC.Core.CN14.AR02` defined twice for 30-day and 14-day backup variants). Renumber the second copy to the next free AR number. Preserve check mappings by matching on `(Section, frozenset(Applicability))` since the renumbered id won't match by id.
12. **`COMPLIANCE_CLASS_MAP` in `export.py` uses `startswith` predicates** for all modern frameworks. Exact match (`name == "ccc_aws"`) is an anti-pattern — it was present for CCC until April 2026 and was the reason CCC couldn't have versioned variants.
13. **Pre-validate every check id** against the per-provider inventory before writing the JSON. A typo silently creates an unreferenced check that will fail when findings try to map to it. The audit script MUST abort with stderr listing typos, not swallow them.
14. **REPLACE is better than PATCH** for audit decisions. Encoding every mapping explicitly makes the audit reproducible and surfaces hidden assumptions from the legacy data. A PATCH system that adds/removes is too easy to forget.
15. **When no check applies, MANUAL is correct.** Do not pad mappings with tangential checks "just in case". Prowler's compliance reports are meant to be actionable — padding them with noise breaks that. Honest manual reqs can be mapped later when new checks land.
16. **UI groups by `Attributes[0].FamilyName` and `Attributes[0].Section`.** If FamilyName has inconsistent variants within the same JSON (e.g., "Logging & Monitoring" vs "Logging and Monitoring"), the UI renders them as separate categories. Section empty → the requirement falls into an orphan control with label "". Normalize before shipping.
17. **Provider coverage is asymmetric.** AWS has dense coverage (~586 checks across 80+ services): in-transit encryption, IAM, database encryption, backup. Azure (~167 checks) and GCP (~102 checks) are thinner especially for in-transit encryption, mTLS, and ML/AI. Accept the asymmetry in mappings — don't force GCP parity where Prowler genuinely can't verify.
---
## Useful One-Liners
```bash
# Count requirements per service prefix (CCC, CIS sections, etc.)
jq -r '.Requirements[].Id | split(".")[1]' prowler/compliance/aws/ccc_aws.json | sort | uniq -c
# Find duplicate requirement IDs
jq -r '.Requirements[].Id' file.json | sort | uniq -d
# Count manual requirements (no checks)
jq '[.Requirements[] | select((.Checks | length) == 0)] | length' file.json
# List all unique check references in a framework
jq -r '.Requirements[].Checks[]' file.json | sort -u
# List all unique Sections (to spot inconsistency)
jq '[.Requirements[].Attributes[0].Section] | unique' file.json
# List all unique FamilyNames (to spot inconsistency)
jq '[.Requirements[].Attributes[0].FamilyName] | unique' file.json
# Diff requirement ids between two versions of the same framework
diff <(jq -r '.Requirements[].Id' a.json | sort) <(jq -r '.Requirements[].Id' b.json | sort)
# Find where a check id is used across all frameworks
grep -rl "my_check_name" prowler/compliance/
# Check if a Prowler check exists
find prowler/providers/aws/services -name "{check_id}.metadata.json"
# Validate a JSON with Pydantic
python -c "from prowler.lib.check.compliance_models import Compliance; print(Compliance.parse_file('prowler/compliance/aws/ccc_aws.json').Framework)"
```
---
## Best Practices
1. **Requirement IDs**: Follow the original framework numbering exactly (e.g., "1.1", "A.5.1", "T1190", "ac_2_1")
2. **Check Mapping**: Map to existing checks when possible. Use `Checks: []` for manual-only requirements
2. **Check Mapping**: Map to existing checks when possible. Use `Checks: []` for manual-only requirements — honest MANUAL beats padded coverage
3. **Completeness**: Include all framework requirements, even those without automated checks
4. **Version Control**: Include framework version in `Name` and `Version` fields
4. **Version Control**: Include framework version in `Name` and `Version` fields. **Never leave `Version: ""`** — it breaks `get_check_compliance()` key format
5. **File Naming**: Use format `{framework}_{version}_{provider}.json`
6. **Validation**: Prowler validates JSON against Pydantic models at startup - invalid JSON will cause errors
6. **Validation**: Prowler validates JSON against Pydantic models at startup invalid JSON will cause errors
7. **Pre-validate check ids** against the provider's `*.metadata.json` inventory before every commit
8. **Normalize FamilyName and Section** to avoid inconsistent UI tree branches
9. **Register everywhere**: SDK model (if needed) → `compliance.py` dispatcher → `__main__.py` CLI writer → `export.py` API map → UI mapper. Skipping any layer results in silent failures
10. **Audit, don't pad**: when reviewing mappings, apply the golden rule — the check's title/risk MUST literally describe what the requirement text says. Tangential relation doesn't count
## Commands
@@ -482,11 +1005,46 @@ prowler aws --compliance cis_5.0_aws -M csv json html
## Code References
- **Compliance Models:** `prowler/lib/check/compliance_models.py`
- **Compliance Processing:** `prowler/lib/check/compliance.py`
- **Compliance Output:** `prowler/lib/outputs/compliance/`
### Layer 1 — SDK / Core
- **Compliance Models:** `prowler/lib/check/compliance_models.py` (Pydantic v1 model tree)
- **Compliance Processing / Linker:** `prowler/lib/check/compliance.py` (`get_check_compliance`, `update_checks_metadata_with_compliance`)
- **Check Utils:** `prowler/lib/check/utils.py` (`list_compliance_modules`)
### Layer 2 — JSON Catalogs
- **Framework JSONs:** `prowler/compliance/{provider}/` (auto-discovered via directory walk)
### Layer 3 — Output Formatters
- **Per-framework folders:** `prowler/lib/outputs/compliance/{framework}/`
- **Shared base class:** `prowler/lib/outputs/compliance/compliance_output.py` (`ComplianceOutput` + `batch_write_data_to_file`)
- **CLI table dispatcher:** `prowler/lib/outputs/compliance/compliance.py` (`display_compliance_table`)
- **Finding model:** `prowler/lib/outputs/finding.py` (**do not import transitively from table dispatcher files — circular import**)
- **CLI writer:** `prowler/__main__.py` (per-provider `elif compliance_name.startswith(...)` branches that instantiate per-provider classes)
### Layer 4 — API / UI
- **API lazy loader:** `api/src/backend/api/compliance.py` (`LazyComplianceTemplate`, `LazyChecksMapping`)
- **API export dispatcher:** `api/src/backend/tasks/jobs/export.py` (`COMPLIANCE_CLASS_MAP` with `startswith` predicates)
- **UI framework router:** `ui/lib/compliance/compliance-mapper.ts`
- **UI per-framework mapper:** `ui/lib/compliance/{framework}.tsx`
- **UI detail panel:** `ui/components/compliance/compliance-custom-details/{framework}-details.tsx`
- **UI types:** `ui/types/compliance.ts`
- **UI icon:** `ui/components/icons/compliance/{framework}.svg` + registration in `IconCompliance.tsx`
### Tests
- **Output formatter tests:** `tests/lib/outputs/compliance/{framework}/{framework}_{provider}_test.py`
- **Shared fixtures:** `tests/lib/outputs/compliance/fixtures.py`
## Resources
- **Templates:** See [assets/](assets/) for framework JSON templates
- **JSON Templates:** See [assets/](assets/) for framework JSON templates (cis, ens, iso27001, mitre_attack, prowler_threatscore, generic)
- **Config-driven compliance sync** (any upstream-backed framework):
- [assets/sync_framework.py](assets/sync_framework.py) — generic runner. Loads a YAML config, dynamically imports the declared parser, applies generic post-processing (id uniqueness safety net, `FamilyName` normalization, legacy check-mapping preservation with config-driven fallback keys), and writes the provider JSONs with Pydantic post-validation. Framework-agnostic — works for any compliance framework.
- [assets/configs/ccc.yaml](assets/configs/ccc.yaml) — canonical config example (FINOS CCC v2025.10). Copy and adapt for new frameworks.
- [assets/parsers/finos_ccc.py](assets/parsers/finos_ccc.py) — FINOS CCC YAML parser. Handles both upstream shapes (`control-families` and top-level `controls`), foreign-prefix AR rewriting, and genuine collision renumbering. Exposes `parse_upstream(config) -> list[dict]`.
- [assets/parsers/](assets/parsers/) — add new parser modules here for unfamiliar upstream formats (NIST OSCAL JSON, MITRE STIX, CIS Benchmarks, etc.). Each parser is a `{name}.py` file implementing `parse_upstream(config) -> list[dict]` with guaranteed-unique ids.
- **Reusable audit tooling** (added April 2026 after the FINOS CCC v2025.10 sync):
- [assets/audit_framework_template.py](assets/audit_framework_template.py) — explicit REPLACE decision ledger with pre-validation against the per-provider inventory. Drop-in template for auditing any framework.
- [assets/query_checks.py](assets/query_checks.py) — keyword/service/id query helper over `/tmp/checks_{provider}.json`.
- [assets/dump_section.py](assets/dump_section.py) — dumps every AR for a given id prefix across all 3 providers with current check mappings.
- [assets/build_inventory.py](assets/build_inventory.py) — generates `/tmp/checks_{provider}.json` from `*.metadata.json` files.
- **Documentation:** See [references/compliance-docs.md](references/compliance-docs.md) for additional resources
- **Related skill:** [prowler-compliance-review](../prowler-compliance-review/SKILL.md) — PR review checklist and validator script for compliance framework PRs
@@ -0,0 +1,207 @@
#!/usr/bin/env python3
"""
Cloud-auditor pass template for any Prowler compliance framework.
Encode explicit REPLACE decisions per (requirement_id, provider) pair below.
Each decision FULLY overwrites the legacy Checks list for that requirement.
Workflow:
1. Run build_inventory.py first to cache per-provider check metadata.
2. Run dump_section.py to see current mappings for the catalog you're auditing.
3. Fill in DECISIONS below with explicit check lists.
4. Run this script it pre-validates every check id against the inventory
and aborts with stderr listing typos before writing.
Decision rules (apply as a hostile cloud auditor):
- The Prowler check's title/risk MUST literally describe what the AR text says.
"Related" is not enough.
- If no check actually addresses the requirement, leave `[]` (= MANUAL).
HONEST MANUAL is worth more than padded coverage.
- Missing provider key = leave the legacy mapping untouched.
- Empty list `[]` = explicitly MANUAL (overwrites legacy).
Usage:
# 1. Copy this file to /tmp/audit_<framework>.py and fill in DECISIONS
# 2. Edit FRAMEWORK_KEY below to match your framework file naming
# 3. Run:
python /tmp/audit_<framework>.py
"""
from __future__ import annotations
import json
import sys
from pathlib import Path
# ---------------------------------------------------------------------------
# Configure for your framework
# ---------------------------------------------------------------------------
# Framework file basename inside prowler/compliance/{provider}/.
# If your framework is called "cis_5.0_aws.json", FRAMEWORK_KEY is "cis_5.0".
# If the file is "ccc_aws.json", FRAMEWORK_KEY is "ccc".
FRAMEWORK_KEY = "ccc"
# Which providers to apply decisions to.
PROVIDERS = ["aws", "azure", "gcp"]
PROWLER_DIR = Path("prowler/compliance")
CHECK_INV = {prov: Path(f"/tmp/checks_{prov}.json") for prov in PROVIDERS}
# ---------------------------------------------------------------------------
# DECISIONS — encode one entry per requirement you want to audit
# ---------------------------------------------------------------------------
# DECISIONS[requirement_id][provider] = list[str] of check ids
# See SKILL.md → "Audit Reference Table: Requirement Text → Prowler Checks"
# for a comprehensive mapping cheat sheet built from a 172-AR CCC audit.
DECISIONS: dict[str, dict[str, list[str]]] = {}
# ---- Example entries (delete and replace with your own) ----
# Example 1: TLS in transit enforced (non-SSH traffic)
# DECISIONS["CCC.Core.CN01.AR01"] = {
# "aws": [
# "cloudfront_distributions_https_enabled",
# "cloudfront_distributions_origin_traffic_encrypted",
# "s3_bucket_secure_transport_policy",
# "elbv2_ssl_listeners",
# "rds_instance_transport_encrypted",
# "kafka_cluster_in_transit_encryption_enabled",
# "redshift_cluster_in_transit_encryption_enabled",
# "opensearch_service_domains_https_communications_enforced",
# ],
# "azure": [
# "storage_secure_transfer_required_is_enabled",
# "app_minimum_tls_version_12",
# "postgresql_flexible_server_enforce_ssl_enabled",
# "sqlserver_recommended_minimal_tls_version",
# ],
# "gcp": [
# "cloudsql_instance_ssl_connections",
# ],
# }
# Example 2: MANUAL — no Prowler check exists
# DECISIONS["CCC.Core.CN01.AR07"] = {
# "aws": [], # no IANA port/protocol check exists in Prowler
# "azure": [],
# "gcp": [],
# }
# Example 3: Reuse a decision for multiple sibling ARs
# DECISIONS["CCC.ObjStor.CN05.AR02"] = DECISIONS["CCC.ObjStor.CN05.AR01"]
# ---------------------------------------------------------------------------
# Driver — do not edit below
# ---------------------------------------------------------------------------
def load_inventory(provider: str) -> dict:
path = CHECK_INV[provider]
if not path.exists():
raise SystemExit(
f"Check inventory missing: {path}\n"
f"Run: python skills/prowler-compliance/assets/build_inventory.py {provider}"
)
with open(path) as f:
return json.load(f)
def resolve_json_path(provider: str) -> Path:
"""Resolve the JSON file path for a given provider.
Handles both shapes: {FRAMEWORK_KEY}_{provider}.json (ccc_aws.json) and
cases where FRAMEWORK_KEY already contains the provider suffix.
"""
candidates = [
PROWLER_DIR / provider / f"{FRAMEWORK_KEY}_{provider}.json",
PROWLER_DIR / provider / f"{FRAMEWORK_KEY}.json",
]
for c in candidates:
if c.exists():
return c
raise SystemExit(
f"Could not find framework JSON for provider={provider} "
f"with FRAMEWORK_KEY={FRAMEWORK_KEY}. Tried: {candidates}"
)
def plan_for_provider(
provider: str,
) -> tuple[Path, dict, tuple[int, int, int], list[tuple[str, str]]]:
"""Build the updated JSON for one provider without writing it.
Returns (path, mutated_data, (touched, added, removed), unknowns).
Writing is deferred to a second pass so that a typo in any provider
aborts the whole run before any file on disk changes.
"""
path = resolve_json_path(provider)
with open(path) as f:
data = json.load(f)
inv = load_inventory(provider)
touched = 0
add_count = 0
rm_count = 0
unknown: list[tuple[str, str]] = []
for req in data["Requirements"]:
rid = req["Id"]
if rid not in DECISIONS or provider not in DECISIONS[rid]:
continue
new_checks = list(dict.fromkeys(DECISIONS[rid][provider]))
for c in new_checks:
if c not in inv:
unknown.append((rid, c))
before = set(req.get("Checks") or [])
after = set(new_checks)
rm_count += len(before - after)
add_count += len(after - before)
req["Checks"] = new_checks
touched += 1
return path, data, (touched, add_count, rm_count), unknown
def main() -> int:
if not DECISIONS:
print("No DECISIONS encoded. Fill in the DECISIONS dict and re-run.")
return 1
print(f"Applying {len(DECISIONS)} decisions to framework '{FRAMEWORK_KEY}'...")
# Pass 1: validate every provider before touching disk. A typo in any
# provider must abort the run before ANY file has been rewritten.
plans: list[tuple[str, Path, dict, tuple[int, int, int]]] = []
all_unknown: list[tuple[str, str, str]] = []
for provider in PROVIDERS:
path, data, counts, unknown = plan_for_provider(provider)
for rid, c in unknown:
all_unknown.append((provider, rid, c))
plans.append((provider, path, data, counts))
if all_unknown:
print("\n!! UNKNOWN CHECK IDS (typos?):", file=sys.stderr)
for provider, rid, c in all_unknown:
print(f" {provider} {rid} -> {c}", file=sys.stderr)
print(
"\nAborting: fix the check ids above and re-run. "
"No files were modified.",
file=sys.stderr,
)
return 2
# Pass 2: all providers validated cleanly — write.
for provider, path, data, (touched, added, removed) in plans:
with open(path, "w") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
f.write("\n")
print(
f" {provider}: touched={touched} added={added} removed={removed}"
)
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -0,0 +1,100 @@
#!/usr/bin/env python3
"""
Build a per-provider check inventory by scanning Prowler's check metadata files.
Outputs one JSON per provider at /tmp/checks_{provider}.json with the shape:
{
"check_id": {
"service": "...",
"subservice": "...",
"resource": "...",
"severity": "...",
"title": "...",
"description": "...",
"risk": "..."
},
...
}
This is the reference used by audit_framework_template.py for pre-validation
(every check id in the audit ledger must exist in the inventory) and by
query_checks.py for keyword/service lookup.
Usage:
python skills/prowler-compliance/assets/build_inventory.py
# Or for a specific provider:
python skills/prowler-compliance/assets/build_inventory.py aws
Output:
/tmp/checks_{provider}.json for every provider discovered under
prowler/providers/ with a services/ directory.
"""
from __future__ import annotations
import json
import sys
from pathlib import Path
PROVIDERS_ROOT = Path("prowler/providers")
def discover_providers() -> list[str]:
"""Return every provider that currently has a services/ directory.
Derived from the filesystem so new providers are picked up automatically
and stale hard-coded lists cannot drift from the repo.
"""
if not PROVIDERS_ROOT.exists():
return []
return sorted(
p.name
for p in PROVIDERS_ROOT.iterdir()
if p.is_dir() and (p / "services").is_dir()
)
def build_for_provider(provider: str) -> dict:
inventory: dict[str, dict] = {}
base = Path(f"prowler/providers/{provider}/services")
if not base.exists():
print(f" skip {provider}: no services directory", file=sys.stderr)
return inventory
for meta_path in base.rglob("*.metadata.json"):
try:
with open(meta_path) as f:
data = json.load(f)
except Exception as exc:
print(f" warn: cannot parse {meta_path}: {exc}", file=sys.stderr)
continue
cid = data.get("CheckID") or meta_path.stem.replace(".metadata", "")
inventory[cid] = {
"service": data.get("ServiceName", ""),
"subservice": data.get("SubServiceName", ""),
"resource": data.get("ResourceType", ""),
"severity": data.get("Severity", ""),
"title": data.get("CheckTitle", ""),
"description": data.get("Description", ""),
"risk": data.get("Risk", ""),
}
return inventory
def main() -> int:
providers = sys.argv[1:] or discover_providers()
if not providers:
print(
f"error: no providers found under {PROVIDERS_ROOT}/",
file=sys.stderr,
)
return 1
for provider in providers:
inv = build_for_provider(provider)
out_path = Path(f"/tmp/checks_{provider}.json")
with open(out_path, "w") as f:
json.dump(inv, f, indent=2)
print(f" {provider}: {len(inv)} checks → {out_path}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -0,0 +1,120 @@
# FINOS Common Cloud Controls (CCC) sync config for sync_framework.py.
#
# Usage:
# python skills/prowler-compliance/assets/sync_framework.py \
# skills/prowler-compliance/assets/configs/ccc.yaml
#
# Prerequisite: run the upstream fetch step from SKILL.md Workflow A Step 1 to
# populate upstream.dir with the raw FINOS catalog YAML files.
framework:
name: CCC
display_name: Common Cloud Controls Catalog (CCC)
version: v2025.10
# The {provider_display} placeholder is replaced at output time with the
# per-provider display string from the providers list below.
description_template: "Common Cloud Controls Catalog (CCC) for {provider_display}"
providers:
- key: aws
display: AWS
- key: azure
display: Azure
- key: gcp
display: GCP
output:
# Supported placeholders: {provider}, {framework}, {version}.
# For versioned frameworks like CIS the template would be
# "prowler/compliance/{provider}/cis_{version}_{provider}.json".
path_template: "prowler/compliance/{provider}/ccc_{provider}.json"
upstream:
# Directory containing the cached FINOS catalog YAMLs. Populate via
# SKILL.md Workflow A Step 1 (gh api raw download commands).
dir: /tmp/ccc_upstream
fetch_docs: "See SKILL.md Workflow A Step 1 for gh api fetch commands"
parser:
# Name of the parser module under parsers/ (loaded dynamically by the
# runner). For FINOS CCC YAML this is always finos_ccc.
module: finos_ccc
# FINOS CCC catalog files in load order. Core first so its ARs render
# first in the output JSON.
catalog_files:
- core_ccc.yaml
- management_auditlog.yaml
- management_logging.yaml
- management_monitoring.yaml
- storage_object.yaml
- networking_loadbalancer.yaml
- networking_vpc.yaml
- crypto_key.yaml
- crypto_secrets.yaml
- database_warehouse.yaml
- database_vector.yaml
- database_relational.yaml
- devtools_build.yaml
- devtools_container-registry.yaml
- identity_iam.yaml
- ai-ml_gen-ai.yaml
- ai-ml_mlde.yaml
- app-integration_message.yaml
- compute_serverless-computing.yaml
# Shape-2 catalogs (storage/object) reference the family via id only
# (e.g. "CCC.ObjStor.Data") with no human-readable title or description
# in the YAML. Map the suffix (after the last dot) to a canonical title
# and description so the generated JSON has consistent FamilyName fields
# regardless of upstream shape.
family_id_title:
Data: Data
IAM: Identity and Access Management
Identity: Identity and Access Management
Encryption: Encryption
Logging: Logging and Monitoring
Network: Network Security
Availability: Availability
Integrity: Integrity
Confidentiality: Confidentiality
family_id_description:
Data: "The Data control family ensures the confidentiality, integrity, availability, and sovereignty of data across its lifecycle."
IAM: "The Identity and Access Management control family ensures that only trusted and authenticated entities can access resources."
post_processing:
# Collapse FamilyName variants that appear inconsistently across upstream
# catalogs. The Prowler UI groups by Attributes[0].FamilyName exactly,
# so each variant would otherwise become a separate tree branch.
family_name_normalization:
"Logging & Monitoring": "Logging and Monitoring"
"Logging and Metrics Publication": "Logging and Monitoring"
# Preserve existing Checks lists from the legacy Prowler JSON when
# regenerating. The runner builds two lookup tables from the legacy
# output: a primary index by Id, and fallback indexes composed of
# attribute field names.
#
# primary_key: the top-level requirement field to use as the primary
# lookup key (almost always "Id")
# fallback_keys: a list of composite keys. Each composite key is a list
# of Attributes[0] field names to join into a tuple. List-valued fields
# (like Applicability) are frozen to frozenset so the tuple is hashable.
#
# CCC uses (Section, Applicability) because Applicability is a CCC-only
# top-level attribute field. CIS would use (Section, Profile). NIST would
# use (ItemId,). The fallback is how renumbered or rewritten ids still
# recover their check mappings.
#
# legacy_path_template (optional): path to read legacy Checks FROM.
# Defaults to output.path_template, which is correct for unversioned
# frameworks (like CCC) where regeneration overwrites the same file.
# For versioned frameworks that write to a new file on each version
# bump (e.g. cis_5.1_aws.json while the legacy mappings live in
# cis_5.0_aws.json), set this to the previous-version path so Checks
# are preserved instead of lost:
# legacy_path_template: "prowler/compliance/{provider}/cis_5.0_{provider}.json"
check_preservation:
primary_key: Id
fallback_keys:
- [Section, Applicability]
@@ -0,0 +1,92 @@
#!/usr/bin/env python3
"""
Dump every requirement of a compliance framework for a given id prefix across
providers, with their current Check mappings.
Useful for reviewing a whole control family in one pass before encoding audit
decisions in audit_framework_template.py.
Usage:
# Dump all CCC.Core requirements across aws/azure/gcp
python skills/prowler-compliance/assets/dump_section.py ccc "CCC.Core."
# Dump all CIS 5.0 section 1 requirements for AWS only
python skills/prowler-compliance/assets/dump_section.py cis_5.0_aws "1."
Arguments:
framework_key: file prefix inside prowler/compliance/{provider}/ without
the provider suffix. Examples:
- "ccc" loads ccc_aws.json / ccc_azure.json / ccc_gcp.json
- "cis_5.0_aws" loads only that one file
- "iso27001_2022" loads all providers
id_prefix: Requirement id prefix to filter by (e.g. "CCC.Core.",
"1.1.", "A.5.").
"""
from __future__ import annotations
import json
import sys
from collections import defaultdict
from pathlib import Path
PROWLER_COMPLIANCE_DIR = Path("prowler/compliance")
def main() -> int:
if len(sys.argv) < 3:
print(__doc__)
return 1
framework_key = sys.argv[1]
id_prefix = sys.argv[2]
# Find matching JSON files across all providers
candidates: list[tuple[str, Path]] = []
for prov_dir in sorted(PROWLER_COMPLIANCE_DIR.iterdir()):
if not prov_dir.is_dir():
continue
for json_path in prov_dir.glob("*.json"):
stem = json_path.stem
if stem == framework_key or stem.startswith(f"{framework_key}_") \
or stem == f"{framework_key}_{prov_dir.name}":
candidates.append((prov_dir.name, json_path))
if not candidates:
print(f"No files matching '{framework_key}'", file=sys.stderr)
return 2
discovered_providers = sorted({prov for prov, _ in candidates})
by_id: dict[str, dict] = defaultdict(dict)
for prov, path in candidates:
with open(path) as f:
data = json.load(f)
for req in data["Requirements"]:
if req["Id"].startswith(id_prefix):
by_id[req["Id"]][prov] = {
"desc": req.get("Description", ""),
"sec": (req.get("Attributes") or [{}])[0].get("Section", ""),
"obj": (req.get("Attributes") or [{}])[0].get(
"SubSectionObjective", ""
),
"checks": req.get("Checks") or [],
}
for ar_id in sorted(by_id):
rows = by_id[ar_id]
sample = next(iter(rows.values()))
print(f"\n### {ar_id}")
print(f" desc: {sample['desc']}")
if sample["sec"]:
print(f" sec : {sample['sec']}")
if sample["obj"]:
print(f" obj : {sample['obj']}")
for prov in discovered_providers:
if prov in rows:
checks = rows[prov]["checks"]
print(f" {prov}: ({len(checks)}) {checks}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -0,0 +1,223 @@
"""
FINOS Common Cloud Controls (CCC) YAML parser.
Reads cached upstream YAML files and emits Prowler-format requirements
(``{Id, Description, Attributes: [...], Checks: []}``). This module is
agnostic to providers, JSON output paths, framework metadata and legacy
check-mapping preservation those are handled by ``sync_framework.py``.
Contract
--------
``parse_upstream(config: dict) -> list[dict]``
Returns a list of Prowler-format requirement dicts with **guaranteed
unique ids**. Foreign-prefix AR rewriting and genuine collision
renumbering both happen inside this module the runner treats id
uniqueness as a contract violation, not as something to fix.
Config keys consumed
--------------------
This parser reads the following config entries (the rest of the config is
opaque to it):
- ``upstream.dir`` directory containing the cached YAMLs
- ``parser.catalog_files`` ordered list of YAML filenames to load
- ``parser.family_id_title`` suffix canonical family title (shape 2)
- ``parser.family_id_description`` suffix family description (shape 2)
Upstream shapes
---------------
FINOS CCC catalogs come in two shapes:
1. ``control-families: [{title, description, controls: [...]}]``
(used by most catalogs)
2. ``controls: [{id, family: "CCC.X.Y", ...}]`` (no families wrapper; used
by ``storage/object``). The ``family`` field references a family id with
no human-readable title in the file the title/description come from
``config.parser.family_id_title`` / ``family_id_description``.
Id rewriting rules
------------------
- **Foreign-prefix rewriting**: upstream intentionally aliases requirements
across catalogs by keeping the original prefix (e.g. ``CCC.AuditLog.CN08.AR01``
appears nested under ``CCC.Logging.CN03``). Prowler requires unique ids
within a catalog file, so we rename the AR to fit its parent control:
``CCC.Logging.CN03.AR01``. See ``rewrite_ar_id()``.
- **Genuine collision renumbering**: sometimes upstream has a real typo
where two distinct requirements share the same id (e.g.
``CCC.Core.CN14.AR02`` appears twice for 30-day and 14-day backup variants).
The second copy is renumbered to the next free AR number within the
control. See the ``seen_ids`` logic in ``emit_requirement()``.
"""
from __future__ import annotations
from pathlib import Path
import yaml
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def clean(value: str | None) -> str:
"""Trim and collapse internal whitespace/newlines into single spaces.
Upstream YAML uses ``|`` block scalars that preserve newlines; Prowler
stores descriptions as single-line text.
"""
if not value:
return ""
return " ".join(value.split())
def flatten_mappings(mappings):
"""Convert upstream ``{reference-id, entries: [{reference-id, ...}]}`` to
Prowler's ``{ReferenceId, Identifiers: [...]}``.
"""
if not mappings:
return []
out = []
for m in mappings:
ids = []
for entry in m.get("entries") or []:
eid = entry.get("reference-id")
if eid:
ids.append(eid)
out.append({"ReferenceId": m.get("reference-id", ""), "Identifiers": ids})
return out
def ar_prefix(ar_id: str) -> str:
"""Return the first three dot-segments of an AR id (the parent control).
e.g. ``CCC.Core.CN01.AR01`` -> ``CCC.Core.CN01``.
"""
return ".".join(ar_id.split(".")[:3])
def rewrite_ar_id(parent_control_id: str, original_ar_id: str, ar_index: int) -> str:
"""If an AR's id doesn't share its parent control's prefix, rename it.
Example
-------
parent ``CCC.Logging.CN03`` + AR id ``CCC.AuditLog.CN08.AR01`` with
index 0 -> ``CCC.Logging.CN03.AR01``.
"""
if ar_prefix(original_ar_id) == parent_control_id:
return original_ar_id
return f"{parent_control_id}.AR{ar_index + 1:02d}"
def emit_requirement(
control: dict,
family_name: str,
family_desc: str,
seen_ids: set[str],
requirements: list[dict],
) -> None:
"""Translate one FINOS control + its assessment-requirements into
Prowler-format requirement dicts and append them to ``requirements``.
Applies foreign-prefix rewriting and genuine-collision renumbering so
the final list is guaranteed to have unique ids.
"""
control_id = clean(control.get("id"))
control_title = clean(control.get("title"))
section = f"{control_id} {control_title}".strip()
objective = clean(control.get("objective"))
threat_mappings = flatten_mappings(control.get("threat-mappings"))
guideline_mappings = flatten_mappings(control.get("guideline-mappings"))
ars = control.get("assessment-requirements") or []
for idx, ar in enumerate(ars):
raw_id = clean(ar.get("id"))
if not raw_id:
continue
new_id = rewrite_ar_id(control_id, raw_id, idx)
# Renumber on genuine upstream collision (find next free AR number)
if new_id in seen_ids:
base = ".".join(new_id.split(".")[:-1])
n = 1
while f"{base}.AR{n:02d}" in seen_ids:
n += 1
new_id = f"{base}.AR{n:02d}"
seen_ids.add(new_id)
requirements.append(
{
"Id": new_id,
"Description": clean(ar.get("text")),
"Attributes": [
{
"FamilyName": family_name,
"FamilyDescription": family_desc,
"Section": section,
"SubSection": "",
"SubSectionObjective": objective,
"Applicability": list(ar.get("applicability") or []),
"Recommendation": clean(ar.get("recommendation")),
"SectionThreatMappings": threat_mappings,
"SectionGuidelineMappings": guideline_mappings,
}
],
"Checks": [],
}
)
# ---------------------------------------------------------------------------
# Public entry point
# ---------------------------------------------------------------------------
def parse_upstream(config: dict) -> list[dict]:
"""Walk upstream YAMLs and emit Prowler-format requirements.
Handles both top-level shapes (``control-families`` and ``controls``).
Ids are guaranteed unique in the returned list.
"""
upstream_dir = Path(config["upstream"]["dir"])
parser_cfg = config.get("parser") or {}
catalog_files = parser_cfg.get("catalog_files") or []
family_id_title = parser_cfg.get("family_id_title") or {}
family_id_description = parser_cfg.get("family_id_description") or {}
requirements: list[dict] = []
seen_ids: set[str] = set()
for filename in catalog_files:
path = upstream_dir / filename
if not path.exists():
# parser.catalog_files is the closed set of upstream catalogs
# that define the framework. Silently skipping a missing file
# would emit valid-looking JSON with part of the framework
# dropped, defeating the whole point of a canonical sync.
raise FileNotFoundError(
f"upstream catalog file not found: {path}\n"
f" hint: refresh the upstream cache (see SKILL.md Workflow A "
f"Step 1), or remove {filename!r} from parser.catalog_files "
f"if it has been retired upstream."
)
with open(path) as f:
doc = yaml.safe_load(f) or {}
# Shape 1: control-families wrapper
for family in doc.get("control-families") or []:
family_name = clean(family.get("title"))
family_desc = clean(family.get("description"))
for control in family.get("controls") or []:
emit_requirement(
control, family_name, family_desc, seen_ids, requirements
)
# Shape 2: top-level controls with family reference id
for control in doc.get("controls") or []:
family_ref = clean(control.get("family"))
suffix = family_ref.split(".")[-1] if family_ref else ""
family_name = family_id_title.get(suffix, suffix or "Data")
family_desc = family_id_description.get(suffix, "")
emit_requirement(
control, family_name, family_desc, seen_ids, requirements
)
return requirements
@@ -0,0 +1,86 @@
#!/usr/bin/env python3
"""
Keyword/service/id lookup over a Prowler check inventory produced by
build_inventory.py.
Usage:
# Keyword AND-search across id + title + risk + description
python skills/prowler-compliance/assets/query_checks.py aws encryption transit
# Show all checks for a service
python skills/prowler-compliance/assets/query_checks.py aws --service iam
# Show full metadata for one check id
python skills/prowler-compliance/assets/query_checks.py aws --id kms_cmk_rotation_enabled
"""
from __future__ import annotations
import json
import sys
def main() -> int:
if len(sys.argv) < 3:
print(__doc__)
return 1
provider = sys.argv[1]
try:
with open(f"/tmp/checks_{provider}.json") as f:
inv = json.load(f)
except FileNotFoundError:
print(
f"No inventory for {provider}. Run build_inventory.py first.",
file=sys.stderr,
)
return 2
if sys.argv[2] == "--service":
if len(sys.argv) < 4:
print("usage: --service <service_name>")
return 1
svc = sys.argv[3]
hits = [cid for cid in sorted(inv) if inv[cid].get("service") == svc]
for cid in hits:
print(f" {cid}")
print(f" {inv[cid].get('title', '')}")
print(f"\n{len(hits)} checks in service '{svc}'")
elif sys.argv[2] == "--id":
if len(sys.argv) < 4:
print("usage: --id <check_id>")
return 1
cid = sys.argv[3]
if cid not in inv:
print(f"NOT FOUND: {cid}")
return 3
m = inv[cid]
print(f"== {cid} ==")
print(f"service : {m.get('service')}")
print(f"severity: {m.get('severity')}")
print(f"resource: {m.get('resource')}")
print(f"title : {m.get('title')}")
print(f"desc : {m.get('description', '')[:500]}")
print(f"risk : {m.get('risk', '')[:500]}")
else:
keywords = [k.lower() for k in sys.argv[2:]]
hits = 0
for cid in sorted(inv):
m = inv[cid]
blob = " ".join(
[
cid,
m.get("title", ""),
m.get("risk", ""),
m.get("description", ""),
]
).lower()
if all(k in blob for k in keywords):
hits += 1
print(f" {cid} [{m.get('service', '')}]")
print(f" {m.get('title', '')[:120]}")
print(f"\n{hits} matches for {' + '.join(keywords)}")
return 0
if __name__ == "__main__":
sys.exit(main())
@@ -0,0 +1,536 @@
#!/usr/bin/env python3
"""
Generic, config-driven compliance framework sync runner.
Usage:
python skills/prowler-compliance/assets/sync_framework.py \
skills/prowler-compliance/assets/configs/ccc.yaml
Pipeline:
1. Load and validate the YAML config (fail fast on missing or empty
required fields notably ``framework.version``, which silently
breaks ``get_check_compliance()`` key construction if empty).
2. Dynamically import the parser module declared in ``parser.module``
(resolved as ``parsers.{name}`` under this script's directory).
3. Call ``parser.parse_upstream(config) -> list[dict]`` to get raw
Prowler-format requirements. The parser owns all upstream-format
quirks (foreign-prefix AR rewriting, collision renumbering, shape
handling) and MUST return ids that are unique within the returned
list.
4. **Safety net**: assert id uniqueness. The runner raises
``ValueError`` on any duplicate it does NOT silently renumber,
because mutating a canonical upstream id (e.g. CIS ``1.1.1`` or
NIST ``AC-2(1)``) would be catastrophic.
5. Apply generic ``FamilyName`` normalization from
``post_processing.family_name_normalization`` (optional).
6. Preserve legacy ``Checks`` lists from the existing Prowler JSON
using a config-driven primary key + fallback key chain. CCC uses
``(Section, Applicability)`` as fallback; CIS would use
``(Section, Profile)``; NIST would use ``(ItemId,)``.
For versioned frameworks (e.g. ``cis_<version>_<provider>.json``)
where a version bump writes to a brand-new file, set
``post_processing.check_preservation.legacy_path_template`` to
point at the previous version's file so its Checks are preserved
instead of silently lost. Defaults to ``output.path_template``
when omitted, which is correct for unversioned frameworks.
7. Wrap each provider's requirements in the framework metadata dict
built from the config templates.
8. Write each provider's JSON to the path resolved from
``output.path_template`` (supports ``{framework}``, ``{version}``
and ``{provider}`` placeholders).
9. Pydantic-validate the written JSON via ``Compliance.parse_file()``
and report the load counts per provider.
The runner is strictly generic it never mentions CCC, knows nothing
about YAML shapes, and can handle any upstream-backed framework given a
parser module and a config file.
"""
from __future__ import annotations
import importlib
import json
import sys
from pathlib import Path
from typing import Any
import yaml
# Make sibling `parsers/` package importable regardless of the runner's
# invocation directory.
_SCRIPT_DIR = Path(__file__).resolve().parent
if str(_SCRIPT_DIR) not in sys.path:
sys.path.insert(0, str(_SCRIPT_DIR))
# ---------------------------------------------------------------------------
# Config loading and validation
# ---------------------------------------------------------------------------
class ConfigError(ValueError):
"""Raised when the sync config is malformed or missing required fields."""
def _require(cfg: dict, dotted_path: str) -> Any:
"""Fetch a dotted-path key from nested dicts. Raises ConfigError on
missing or empty values (empty-string, empty-list, None)."""
current: Any = cfg
parts = dotted_path.split(".")
for i, part in enumerate(parts):
if not isinstance(current, dict) or part not in current:
raise ConfigError(f"config: missing required field '{dotted_path}'")
current = current[part]
if current in ("", None, [], {}):
raise ConfigError(f"config: field '{dotted_path}' must not be empty")
return current
def load_config(path: Path) -> dict:
if not path.exists():
raise ConfigError(f"config file not found: {path}")
with open(path) as f:
cfg = yaml.safe_load(f) or {}
if not isinstance(cfg, dict):
raise ConfigError(f"config root must be a mapping, got {type(cfg).__name__}")
# Required fields — fail fast. Empty Version in particular silently
# breaks get_check_compliance() key construction.
_require(cfg, "framework.name")
_require(cfg, "framework.display_name")
_require(cfg, "framework.version")
_require(cfg, "framework.description_template")
_require(cfg, "providers")
_require(cfg, "output.path_template")
_require(cfg, "upstream.dir")
_require(cfg, "parser.module")
_require(cfg, "post_processing.check_preservation.primary_key")
providers = cfg["providers"]
if not isinstance(providers, list) or not providers:
raise ConfigError("config: 'providers' must be a non-empty list")
for idx, p in enumerate(providers):
if not isinstance(p, dict) or "key" not in p or "display" not in p:
raise ConfigError(
f"config: providers[{idx}] must have 'key' and 'display' fields"
)
return cfg
# ---------------------------------------------------------------------------
# Parser loading
# ---------------------------------------------------------------------------
def load_parser(parser_module_name: str):
try:
return importlib.import_module(f"parsers.{parser_module_name}")
except ImportError as exc:
raise ConfigError(
f"cannot import parser 'parsers.{parser_module_name}': {exc}"
) from exc
# ---------------------------------------------------------------------------
# Post-processing: id uniqueness safety net
# ---------------------------------------------------------------------------
def assert_unique_ids(requirements: list[dict]) -> None:
"""Enforce the parser contract: every requirement must have a unique Id.
The runner never renumbers silently a duplicate is a parser bug.
"""
seen: set[str] = set()
dups: list[str] = []
for req in requirements:
rid = req.get("Id")
if not rid:
raise ValueError(f"requirement missing Id: {req}")
if rid in seen:
dups.append(rid)
seen.add(rid)
if dups:
raise ValueError(
f"parser returned duplicate requirement ids: {sorted(set(dups))}"
)
# ---------------------------------------------------------------------------
# Post-processing: FamilyName normalization
# ---------------------------------------------------------------------------
def normalize_family_names(requirements: list[dict], norm_map: dict[str, str]) -> None:
"""Apply ``Attributes[0].FamilyName`` normalization in place."""
if not norm_map:
return
for req in requirements:
for attr in req.get("Attributes") or []:
name = attr.get("FamilyName")
if name in norm_map:
attr["FamilyName"] = norm_map[name]
# ---------------------------------------------------------------------------
# Post-processing: legacy check-mapping preservation
# ---------------------------------------------------------------------------
def _freeze(value: Any) -> Any:
"""Make a value hashable for use in composite lookup keys.
Lists become frozensets (order-insensitive match). Scalars pass through.
"""
if isinstance(value, list):
return frozenset(value)
return value
def _build_fallback_key(attrs: dict, field_names: list[str]) -> tuple | None:
"""Build a composite tuple key from the given attribute field names.
Returns None if any field is missing or falsy that key will be
skipped (the lookup table just won't have an entry for it).
"""
parts = []
for name in field_names:
if name not in attrs:
return None
value = attrs[name]
if value in ("", None, [], {}):
return None
parts.append(_freeze(value))
return tuple(parts)
def load_legacy_check_maps(
legacy_path: Path,
primary_key: str,
fallback_keys: list[list[str]],
) -> tuple[dict[str, list[str]], list[dict[tuple, list[str]]]]:
"""Read the existing Prowler JSON and build lookup tables for check
preservation.
Fails fast on ambiguous preservation keys. If two distinct legacy
requirements share the same primary value or the same fallback tuple,
merging their ``Checks`` silently would corrupt the preserved mapping
for unrelated requirements. Raises ``ValueError`` listing every
conflict so the user can either dedupe the legacy data or strengthen
``check_preservation`` in the sync config.
Returns
-------
by_primary : dict
``{primary_value: [checks]}`` e.g. ``{ar_id: [checks]}``.
by_fallback : list[dict]
One lookup dict per entry in ``fallback_keys``. Each maps a
composite tuple key to its preserved checks list.
"""
by_primary: dict[str, list[str]] = {}
by_fallback: list[dict[tuple, list[str]]] = [{} for _ in fallback_keys]
if not legacy_path.exists():
return by_primary, by_fallback
with open(legacy_path) as f:
data = json.load(f)
# Track which legacy requirement Ids contributed to each bucket so we
# can surface ambiguity after the scan completes.
primary_sources: dict[str, list[str]] = {}
fallback_sources: list[dict[tuple, list[str]]] = [{} for _ in fallback_keys]
for req in data.get("Requirements") or []:
legacy_id = req.get("Id") or "<missing-Id>"
checks = req.get("Checks") or []
pv = req.get(primary_key)
if pv:
primary_sources.setdefault(pv, []).append(legacy_id)
bucket = by_primary.setdefault(pv, [])
for c in checks:
if c not in bucket:
bucket.append(c)
attributes = req.get("Attributes") or []
if not attributes:
continue
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key is None:
continue
fallback_sources[i].setdefault(key, []).append(legacy_id)
bucket = by_fallback[i].setdefault(key, [])
for c in checks:
if c not in bucket:
bucket.append(c)
conflicts: list[str] = []
for pv, ids in primary_sources.items():
if len(ids) > 1:
conflicts.append(
f"primary_key={primary_key!r} value={pv!r} shared by {ids}"
)
for i, field_names in enumerate(fallback_keys):
for key, ids in fallback_sources[i].items():
if len(ids) > 1:
conflicts.append(
f"fallback_key={field_names} value={key!r} shared by {ids}"
)
if conflicts:
details = "\n - ".join(conflicts)
raise ValueError(
f"ambiguous preservation keys in {legacy_path} — cannot "
f"faithfully preserve Checks across distinct requirements:\n"
f" - {details}\n"
f"Fix: dedupe the legacy JSON, or strengthen "
f"'post_processing.check_preservation' in the sync config "
f"(e.g. add a more discriminating field to fallback_keys)."
)
return by_primary, by_fallback
def lookup_preserved_checks(
req: dict,
by_primary: dict,
by_fallback: list[dict],
primary_key: str,
fallback_keys: list[list[str]],
) -> list[str]:
"""Return preserved check ids for a requirement, trying the primary
key first then each fallback in order."""
pv = req.get(primary_key)
if pv and pv in by_primary:
return list(by_primary[pv])
attributes = req.get("Attributes") or []
if not attributes:
return []
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key and key in by_fallback[i]:
return list(by_fallback[i][key])
return []
# ---------------------------------------------------------------------------
# Provider output assembly
# ---------------------------------------------------------------------------
def resolve_output_path(template: str, framework: dict, provider_key: str) -> Path:
return Path(
template.format(
provider=provider_key,
framework=framework["name"].lower(),
version=framework["version"],
)
)
def build_provider_json(
config: dict,
provider: dict,
base_requirements: list[dict],
) -> tuple[dict, dict[str, int]]:
"""Produce the provider-specific JSON dict ready to dump.
Returns ``(json_dict, counts)`` where ``counts`` tracks how each
requirement's checks were resolved (primary, fallback, or none).
"""
framework = config["framework"]
preservation = config["post_processing"]["check_preservation"]
primary_key = preservation["primary_key"]
fallback_keys = preservation.get("fallback_keys") or []
# For versioned frameworks, the file we WRITE (output.path_template
# resolved at the new version) is not the file we want to READ legacy
# Checks from. Allow the config to override the legacy source path so
# a version bump can still preserve mappings from the previous file.
legacy_template = (
preservation.get("legacy_path_template")
or config["output"]["path_template"]
)
legacy_path = resolve_output_path(
legacy_template, framework, provider["key"]
)
by_primary, by_fallback = load_legacy_check_maps(
legacy_path, primary_key, fallback_keys
)
counts = {"primary": 0, "fallback": 0, "none": 0}
enriched: list[dict] = []
for req in base_requirements:
# Try primary key first
pv = req.get(primary_key)
checks: list[str] = []
source = "none"
if pv and pv in by_primary:
checks = list(by_primary[pv])
source = "primary"
else:
attributes = req.get("Attributes") or []
if attributes:
attrs = attributes[0]
for i, field_names in enumerate(fallback_keys):
key = _build_fallback_key(attrs, field_names)
if key and key in by_fallback[i]:
checks = list(by_fallback[i][key])
source = "fallback"
break
counts[source] += 1
enriched.append(
{
"Id": req["Id"],
"Description": req["Description"],
# Shallow-copy attribute dicts so providers don't share refs
"Attributes": [dict(a) for a in req.get("Attributes") or []],
"Checks": checks,
}
)
description = framework["description_template"].format(
provider_display=provider["display"],
provider_key=provider["key"],
framework_name=framework["name"],
framework_display=framework["display_name"],
version=framework["version"],
)
out = {
"Framework": framework["name"],
"Version": framework["version"],
"Provider": provider["display"],
"Name": framework["display_name"],
"Description": description,
"Requirements": enriched,
}
return out, counts
# ---------------------------------------------------------------------------
# Pydantic post-validation
# ---------------------------------------------------------------------------
def pydantic_validate(json_path: Path) -> int:
"""Import Prowler lazily so the runner still works without Prowler
installed (validation step is skipped in that case)."""
try:
from prowler.lib.check.compliance_models import Compliance
except ImportError:
print(
" note: prowler package not importable — skipping Pydantic validation",
file=sys.stderr,
)
return -1
try:
parsed = Compliance.parse_file(str(json_path))
except Exception as exc:
raise RuntimeError(
f"Pydantic validation failed for {json_path}: {exc}"
) from exc
return len(parsed.Requirements)
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def main() -> int:
if len(sys.argv) != 2:
print("usage: sync_framework.py <config.yaml>", file=sys.stderr)
return 1
config_path = Path(sys.argv[1])
try:
config = load_config(config_path)
except ConfigError as exc:
print(f"config error: {exc}", file=sys.stderr)
return 2
framework_name = config["framework"]["name"]
upstream_dir = Path(config["upstream"]["dir"])
if not upstream_dir.exists():
print(
f"error: upstream cache dir {upstream_dir} not found\n"
f" hint: {config['upstream'].get('fetch_docs', '(see SKILL.md Workflow A Step 1)')}",
file=sys.stderr,
)
return 3
parser_module_name = config["parser"]["module"]
print(
f"Sync: framework={framework_name} version={config['framework']['version']} "
f"parser={parser_module_name}"
)
try:
parser = load_parser(parser_module_name)
except ConfigError as exc:
print(f"parser error: {exc}", file=sys.stderr)
return 4
print(f"Parsing upstream from {upstream_dir}...")
try:
base_requirements = parser.parse_upstream(config)
except FileNotFoundError as exc:
# A missing catalog declared in parser.catalog_files is a hard
# failure: emitting JSON with part of the framework silently
# dropped would violate the canonical-sync contract.
print(f"upstream error: {exc}", file=sys.stderr)
return 6
print(f" parser returned {len(base_requirements)} requirements")
# Safety-net: parser contract
try:
assert_unique_ids(base_requirements)
except ValueError as exc:
print(f"parser contract violation: {exc}", file=sys.stderr)
return 5
# Post-processing: family name normalization
norm_map = (
config.get("post_processing", {})
.get("family_name_normalization")
or {}
)
normalize_family_names(base_requirements, norm_map)
# Per-provider output
print()
for provider in config["providers"]:
provider_json, counts = build_provider_json(
config, provider, base_requirements
)
out_path = resolve_output_path(
config["output"]["path_template"],
config["framework"],
provider["key"],
)
out_path.parent.mkdir(parents=True, exist_ok=True)
with open(out_path, "w") as f:
json.dump(provider_json, f, indent=2, ensure_ascii=False)
f.write("\n")
validated = pydantic_validate(out_path)
validated_msg = (
f" pydantic_reqs={validated}" if validated >= 0 else " pydantic=skipped"
)
print(
f" {provider['key']}: total={len(provider_json['Requirements'])} "
f"matched_primary={counts['primary']} "
f"matched_fallback={counts['fallback']} "
f"new_or_unmatched={counts['none']}{validated_msg}"
)
print(f" wrote {out_path}")
print("\nDone.")
return 0
if __name__ == "__main__":
sys.exit(main())
+50 -12
View File
@@ -1,10 +1,10 @@
#!/bin/bash
# Setup AI Skills for Prowler development
# Configures AI coding assistants that follow agentskills.io standard:
# - Claude Code: .claude/skills/ symlink + CLAUDE.md copies
# - Gemini CLI: .gemini/skills/ symlink + GEMINI.md copies
# - Claude Code: .claude/skills/ symlink + CLAUDE.md symlink
# - Gemini CLI: .gemini/skills/ symlink + GEMINI.md symlink
# - Codex (OpenAI): .codex/skills/ symlink + AGENTS.md (native)
# - GitHub Copilot: .github/copilot-instructions.md copy
# - GitHub Copilot: .github/copilot-instructions.md symlink
#
# Usage:
# ./setup.sh # Interactive mode (select AI assistants)
@@ -37,6 +37,28 @@ SETUP_COPILOT=false
# HELPER FUNCTIONS
# =============================================================================
add_to_gitignore() {
local pattern="$1"
local gitignore_file="$REPO_ROOT/.gitignore"
local header="# AI Coding assistants assets"
# Create .gitignore if it doesn't exist
if [ ! -f "$gitignore_file" ]; then
touch "$gitignore_file"
fi
# Check if pattern exists (exact match or at end of file)
if ! grep -qxF "$pattern" "$gitignore_file"; then
# Check if header exists
if ! grep -qxF "$header" "$gitignore_file"; then
echo -e "\n\n$header" >> "$gitignore_file"
fi
echo "$pattern" >> "$gitignore_file"
echo -e "${GREEN} ✓ Added $pattern to .gitignore${NC}"
fi
}
show_help() {
echo "Usage: $0 [OPTIONS]"
echo ""
@@ -109,6 +131,7 @@ setup_claude() {
if [ ! -d "$REPO_ROOT/.claude" ]; then
mkdir -p "$REPO_ROOT/.claude"
fi
add_to_gitignore ".claude/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -119,8 +142,9 @@ setup_claude() {
ln -s "$SKILLS_SOURCE" "$target"
echo -e "${GREEN} ✓ .claude/skills -> skills/${NC}"
# Copy AGENTS.md to CLAUDE.md
copy_agents_md "CLAUDE.md"
# Link AGENTS.md to CLAUDE.md
link_agents_md "CLAUDE.md"
add_to_gitignore "CLAUDE.md"
}
setup_gemini() {
@@ -129,6 +153,7 @@ setup_gemini() {
if [ ! -d "$REPO_ROOT/.gemini" ]; then
mkdir -p "$REPO_ROOT/.gemini"
fi
add_to_gitignore ".gemini/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -139,8 +164,9 @@ setup_gemini() {
ln -s "$SKILLS_SOURCE" "$target"
echo -e "${GREEN} ✓ .gemini/skills -> skills/${NC}"
# Copy AGENTS.md to GEMINI.md
copy_agents_md "GEMINI.md"
# Link AGENTS.md to GEMINI.md
link_agents_md "GEMINI.md"
add_to_gitignore "GEMINI.md"
}
setup_codex() {
@@ -149,6 +175,7 @@ setup_codex() {
if [ ! -d "$REPO_ROOT/.codex" ]; then
mkdir -p "$REPO_ROOT/.codex"
fi
add_to_gitignore ".codex/skills"
if [ -L "$target" ]; then
rm "$target"
@@ -164,12 +191,19 @@ setup_codex() {
setup_copilot() {
if [ -f "$REPO_ROOT/AGENTS.md" ]; then
mkdir -p "$REPO_ROOT/.github"
cp "$REPO_ROOT/AGENTS.md" "$REPO_ROOT/.github/copilot-instructions.md"
# Link AGENTS.md -> .github/copilot-instructions.md
local target="$REPO_ROOT/.github/copilot-instructions.md"
ln -sf "../AGENTS.md" "$target"
echo -e "${GREEN} ✓ AGENTS.md -> .github/copilot-instructions.md${NC}"
# Add specifically the file, NOT the .github folder
add_to_gitignore ".github/copilot-instructions.md"
fi
}
copy_agents_md() {
link_agents_md() {
local target_name="$1"
local agents_files
local count=0
@@ -179,11 +213,15 @@ copy_agents_md() {
for agents_file in $agents_files; do
local agents_dir
agents_dir=$(dirname "$agents_file")
cp "$agents_file" "$agents_dir/$target_name"
# Create relative symlink
# Since files are in same dir, we can just link to basename
(cd "$agents_dir" && ln -sf "$(basename "$agents_file")" "$target_name")
count=$((count + 1))
done
echo -e "${GREEN}Copied $count AGENTS.md -> $target_name${NC}"
echo -e "${GREEN}Linked $count AGENTS.md -> $target_name${NC}"
}
# =============================================================================
@@ -302,4 +340,4 @@ echo "Configured:"
[ "$SETUP_COPILOT" = true ] && echo " • GitHub Copilot: .github/copilot-instructions.md"
echo ""
echo -e "${BLUE}Note: Restart your AI assistant to load the skills.${NC}"
echo -e "${BLUE} AGENTS.md is the source of truth - edit it, then re-run this script.${NC}"
echo -e "${BLUE} AGENTS.md is the source of truth - changes are reflected automatically via symlinks.${NC}"
+17 -17
View File
@@ -201,40 +201,40 @@ test_symlink_not_created_without_flag() {
}
# =============================================================================
# TESTS: AGENTS.md COPYING
# TESTS: AGENTS.md LINKING
# =============================================================================
test_copy_claude_agents_md() {
test_link_claude_agents_md() {
run_setup --claude > /dev/null
assert_file_exists "$TEST_DIR/CLAUDE.md" "Root CLAUDE.md should exist" && \
assert_file_exists "$TEST_DIR/api/CLAUDE.md" "api/CLAUDE.md should exist" && \
assert_file_exists "$TEST_DIR/ui/CLAUDE.md" "ui/CLAUDE.md should exist"
assert_symlink_exists "$TEST_DIR/CLAUDE.md" "Root CLAUDE.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/api/CLAUDE.md" "api/CLAUDE.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/ui/CLAUDE.md" "ui/CLAUDE.md should be a symlink"
}
test_copy_gemini_agents_md() {
test_link_gemini_agents_md() {
run_setup --gemini > /dev/null
assert_file_exists "$TEST_DIR/GEMINI.md" "Root GEMINI.md should exist" && \
assert_file_exists "$TEST_DIR/api/GEMINI.md" "api/GEMINI.md should exist" && \
assert_file_exists "$TEST_DIR/ui/GEMINI.md" "ui/GEMINI.md should exist"
assert_symlink_exists "$TEST_DIR/GEMINI.md" "Root GEMINI.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/api/GEMINI.md" "api/GEMINI.md should be a symlink" && \
assert_symlink_exists "$TEST_DIR/ui/GEMINI.md" "ui/GEMINI.md should be a symlink"
}
test_copy_copilot_to_github() {
test_link_copilot_to_github() {
run_setup --copilot > /dev/null
assert_file_exists "$TEST_DIR/.github/copilot-instructions.md" "Copilot instructions should exist"
assert_symlink_exists "$TEST_DIR/.github/copilot-instructions.md" "Copilot instructions should be a symlink"
}
test_copy_codex_no_extra_files() {
test_link_codex_no_extra_files() {
run_setup --codex > /dev/null
assert_file_not_exists "$TEST_DIR/CODEX.md" "CODEX.md should not be created"
}
test_copy_not_created_without_flag() {
test_link_not_created_without_flag() {
run_setup --codex > /dev/null
assert_file_not_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should not exist" && \
assert_file_not_exists "$TEST_DIR/GEMINI.md" "GEMINI.md should not exist"
assert_symlink_not_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should not exist" && \
assert_symlink_not_exists "$TEST_DIR/GEMINI.md" "GEMINI.md should not exist"
}
test_copy_content_matches_source() {
test_link_content_matches_source() {
run_setup --claude > /dev/null
local source_content target_content
source_content=$(cat "$TEST_DIR/AGENTS.md")
@@ -272,7 +272,7 @@ test_idempotent_multiple_runs() {
run_setup --claude > /dev/null
run_setup --claude > /dev/null
assert_symlink_exists "$TEST_DIR/.claude/skills" "Symlink should still exist after second run" && \
assert_file_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should still exist after second run"
assert_symlink_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should still be a symlink after second run"
}
# =============================================================================
File diff suppressed because it is too large Load Diff
@@ -12,6 +12,8 @@ from prowler.providers.github.exceptions.exceptions import (
GithubInvalidCredentialsError,
GithubInvalidProviderIdError,
GithubInvalidTokenError,
GithubRepoListFileNotFoundError,
GithubRepoListFileReadError,
GithubSetUpIdentityError,
GithubSetUpSessionError,
)
@@ -708,3 +710,81 @@ class Test_GithubProvider_Scoping:
assert provider_none.repositories == []
assert provider_none.organizations == []
class TestGitHubProviderLoadReposFromFile:
"""Tests for GithubProvider._load_repos_from_file"""
def _make_provider(self):
"""Create a GithubProvider instance with mocked session/identity."""
with (
patch(
"prowler.providers.github.github_provider.GithubProvider.setup_session",
return_value=GithubSession(token=PAT_TOKEN, id="", key=""),
),
patch(
"prowler.providers.github.github_provider.GithubProvider.setup_identity",
return_value=GithubIdentityInfo(
account_id=ACCOUNT_ID,
account_name=ACCOUNT_NAME,
account_url=ACCOUNT_URL,
),
),
):
provider = GithubProvider(
personal_access_token=PAT_TOKEN,
)
return provider
def test_load_repos_from_file_happy_path(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
repo_file.write_text("owner/repo-a\nowner/repo-b\nowner/repo-c\n")
provider._load_repos_from_file(str(repo_file))
assert "owner/repo-a" in provider.repositories
assert "owner/repo-b" in provider.repositories
assert "owner/repo-c" in provider.repositories
def test_load_repos_from_file_comments_and_blanks(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
repo_file.write_text(
"# This is a comment\n"
"\n"
"owner/repo-a\n"
" # Another comment\n"
" \n"
"owner/repo-b\n"
)
provider._load_repos_from_file(str(repo_file))
assert provider.repositories == ["owner/repo-a", "owner/repo-b"]
def test_load_repos_from_file_not_found(self):
provider = self._make_provider()
with pytest.raises(GithubRepoListFileNotFoundError):
provider._load_repos_from_file("/nonexistent/path/repos.txt")
def test_load_repos_from_file_exceeds_max_lines(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
# Write MAX_REPO_LIST_LINES + 1 lines to trigger the guard
lines = [f"owner/repo-{i}" for i in range(provider.MAX_REPO_LIST_LINES + 1)]
repo_file.write_text("\n".join(lines) + "\n")
with pytest.raises(GithubRepoListFileReadError):
provider._load_repos_from_file(str(repo_file))
def test_load_repos_from_file_skips_long_names(self, tmp_path):
provider = self._make_provider()
repo_file = tmp_path / "repos.txt"
long_name = "a" * (provider.MAX_REPO_NAME_LENGTH + 1)
repo_file.write_text(f"owner/valid-repo\n{long_name}\nowner/also-valid\n")
provider._load_repos_from_file(str(repo_file))
assert provider.repositories == ["owner/valid-repo", "owner/also-valid"]
@@ -82,13 +82,14 @@ class Test_GitHubArguments:
arguments.init_parser(mock_github_args)
# Verify scoping arguments were added
assert self.mock_scoping_group.add_argument.call_count == 2
assert self.mock_scoping_group.add_argument.call_count == 3
# Check that all scoping arguments are present
calls = self.mock_scoping_group.add_argument.call_args_list
scoping_args = [call[0][0] for call in calls]
assert "--repository" in scoping_args
assert "--repo-list-file" in scoping_args
assert "--organization" in scoping_args
def test_repository_argument_configuration(self):
@@ -277,6 +278,33 @@ class Test_GitHubArguments_Integration:
assert args.repository == ["owner1/repo1"]
assert args.organization == ["org1"]
def test_real_argument_parsing_with_repo_list_file(self):
"""Test parsing arguments with repo-list-file scoping"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
common_parser = argparse.ArgumentParser(add_help=False)
mock_github_args = MagicMock()
mock_github_args.subparsers = subparsers
mock_github_args.common_providers_parser = common_parser
arguments.init_parser(mock_github_args)
# Parse arguments with repo-list-file
args = parser.parse_args(
[
"github",
"--personal-access-token",
"test-token",
"--repo-list-file",
"/path/to/repos.txt",
]
)
assert args.personal_access_token == "test-token"
assert args.repo_list_file == "/path/to/repos.txt"
assert args.repository is None
def test_real_argument_parsing_empty_scoping(self):
"""Test parsing arguments with empty scoping values"""
parser = argparse.ArgumentParser()
+1 -1
View File
@@ -13,5 +13,5 @@ README.md
!.next/static
!.next/standalone
.git
.husky
.pre-commit-config.yaml
scripts/setup-git-hooks.js
+37
View File
@@ -0,0 +1,37 @@
orphan: true
repos:
- repo: local
hooks:
- id: ui-typecheck
name: UI - TypeScript Check
entry: pnpm run typecheck
language: system
files: '\.(ts|tsx|js|jsx)$'
pass_filenames: false
priority: 0
- id: ui-lint
name: UI - ESLint
entry: pnpm run lint:check
language: system
files: '\.(ts|tsx|js|jsx)$'
pass_filenames: false
priority: 0
- id: ui-tests
name: UI - Unit Tests
entry: pnpm exec vitest related --run
language: system
files: '\.(ts|tsx|js|jsx)$'
exclude: '\.test\.|\.spec\.|vitest\.config|vitest\.setup'
pass_filenames: true
priority: 1
- id: ui-build
name: UI - Build
entry: pnpm run build
language: system
files: '\.(ts|tsx|js|jsx|json|css)$'
pass_filenames: false
priority: 2
+9
View File
@@ -2,6 +2,15 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.25.0] (Prowler UNRELEASED)
### 🔄 Changed
- Redesign compliance page with a horizontal ThreatScore card (always-visible pillar breakdown + ActionDropdown), client-side search for compliance frameworks, compact scan selector trigger, responsive mobile filters, download-started toasts for CSV/PDF exports, enhanced compliance cards with truncated titles, and Alert-based empty/error states; migrate Progress component from HeroUI to shadcn [(#10767)](https://github.com/prowler-cloud/prowler/pull/10767)
- Backward-compatibility middleware redirect from `/sign-up?invitation_token=…` to `/invitation/accept?invitation_token=…`; new invitation emails use `/invitation/accept` directly [(#10797)](https://github.com/prowler-cloud/prowler/pull/10797)
---
## [1.24.2] (Prowler v5.24.2)
### 🐞 Fixed
+2 -2
View File
@@ -85,10 +85,10 @@ git clone git@github.com:prowler-cloud/ui.git
pnpm install
```
**Note:** The `pnpm install` command will automatically configure Git hooks for code quality checks. If you experience issues, you can manually configure them:
**Note:** The `pnpm install` command will automatically configure prek Git hooks for code quality checks. If hooks are not installed, run from the repo root:
```bash
git config core.hooksPath "ui/.husky"
prek install
```
#### Run the development server
+4 -7
View File
@@ -6,12 +6,10 @@ import { handleApiResponse } from "@/lib/server-actions-helper";
export const getCompliancesOverview = async ({
scanId,
region,
query,
filters = {},
}: {
scanId?: string;
region?: string | string[];
query?: string;
filters?: Record<string, string | string[] | undefined>;
} = {}) => {
const headers = await getAuthHeaders({ contentType: false });
@@ -31,8 +29,6 @@ export const getCompliancesOverview = async ({
setParam("filter[scan_id]", scanId);
setParam("filter[region__in]", region);
if (query) url.searchParams.set("filter[search]", query);
try {
const response = await fetch(url.toString(), {
headers,
@@ -46,15 +42,16 @@ export const getCompliancesOverview = async ({
};
export const getComplianceOverviewMetadataInfo = async ({
query = "",
sort = "",
filters = {},
}) => {
}: {
sort?: string;
filters?: Record<string, string | string[] | undefined>;
} = {}) => {
const headers = await getAuthHeaders({ contentType: false });
const url = new URL(`${apiBaseUrl}/compliance-overviews/metadata`);
if (query) url.searchParams.append("filter[search]", query);
if (sort) url.searchParams.append("sort", sort);
Object.entries(filters).forEach(([key, value]) => {
@@ -8,10 +8,6 @@ import { useEffect, useRef, useState } from "react";
import { acceptInvitation } from "@/actions/invitations";
import { Button } from "@/components/shadcn";
import {
INVITATION_ACTION_PARAM,
INVITATION_SIGNUP_ACTION,
} from "@/lib/invitation-routing";
type AcceptState =
| { kind: "no-token" }
@@ -204,7 +200,7 @@ export function AcceptInvitationClient({
className="w-full"
onClick={() => {
router.push(
`/sign-up?invitation_token=${encodeURIComponent(token!)}&${INVITATION_ACTION_PARAM}=${INVITATION_SIGNUP_ACTION}`,
`/sign-up?invitation_token=${encodeURIComponent(token!)}`,
);
}}
>
@@ -78,7 +78,7 @@ export default async function ComplianceDetail({
await Promise.all([
getComplianceOverviewMetadataInfo({
filters: {
"filter[scan_id]": selectedScanId,
"filter[scan_id]": selectedScanId ?? undefined,
},
}),
getComplianceAttributes(complianceId),
+16
View File
@@ -0,0 +1,16 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("Compliance overview page", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "page.tsx");
const source = readFileSync(filePath, "utf8");
it("delegates client-side search to ComplianceOverviewGrid", () => {
expect(source).toContain("ComplianceOverviewGrid");
expect(source).not.toContain("filter[search]");
});
});
+84 -85
View File
@@ -1,3 +1,4 @@
import { Info } from "lucide-react";
import { Suspense } from "react";
import {
@@ -7,12 +8,14 @@ import {
import { getThreatScore } from "@/actions/overview";
import { getScans } from "@/actions/scans";
import {
ComplianceCard,
ComplianceSkeletonGrid,
NoScansAvailable,
ThreatScoreBadge,
} from "@/components/compliance";
import { ComplianceHeader } from "@/components/compliance/compliance-header/compliance-header";
import { ComplianceFilters } from "@/components/compliance/compliance-header/compliance-filters";
import { ComplianceOverviewGrid } from "@/components/compliance/compliance-overview-grid";
import { Alert, AlertDescription } from "@/components/shadcn/alert";
import { Card, CardContent } from "@/components/shadcn/card/card";
import { ContentLayout } from "@/components/ui";
import {
ExpandedScanData,
@@ -30,12 +33,6 @@ export default async function Compliance({
const resolvedSearchParams = await searchParams;
const searchParamsKey = JSON.stringify(resolvedSearchParams || {});
const filters = Object.fromEntries(
Object.entries(resolvedSearchParams).filter(([key]) =>
key.startsWith("filter["),
),
);
const scansData = await getScans({
filters: {
"filter[state]": "completed",
@@ -79,9 +76,12 @@ export default async function Compliance({
.filter(Boolean) as ExpandedScanData[];
// Use scanId from URL, or select the first scan if not provided
const selectedScanId =
resolvedSearchParams.scanId || expandedScansData[0]?.id || null;
const query = (filters["filter[search]"] as string) || "";
const scanIdParam = resolvedSearchParams.scanId;
const scanIdFromUrl = Array.isArray(scanIdParam)
? scanIdParam[0]
: scanIdParam;
const selectedScanId: string | null =
scanIdFromUrl || expandedScansData[0]?.id || null;
// Find the selected scan
const selectedScan = expandedScansData.find(
@@ -102,7 +102,6 @@ export default async function Compliance({
// Fetch metadata if we have a selected scan
const metadataInfoData = selectedScanId
? await getComplianceOverviewMetadataInfo({
query,
filters: {
"filter[scan_id]": selectedScanId,
},
@@ -131,28 +130,39 @@ export default async function Compliance({
<ContentLayout title="Compliance" icon="lucide:shield-check">
{selectedScanId ? (
<>
<div className="mb-6 flex flex-col gap-6 lg:flex-row lg:items-start lg:justify-between">
<div className="min-w-0 flex-1">
<ComplianceHeader
scans={expandedScansData}
uniqueRegions={uniqueRegions}
/>
</div>
{threatScoreData &&
typeof selectedScanId === "string" &&
selectedScan && (
<div className="w-full lg:w-[360px] lg:flex-shrink-0">
<ThreatScoreBadge
score={threatScoreData.score}
scanId={selectedScanId}
provider={selectedScan.providerInfo.provider}
selectedScan={selectedScanData}
sectionScores={threatScoreData.sectionScores}
/>
</div>
)}
{/* Row 1: Filters */}
<div className="mb-6">
<ComplianceFilters
scans={expandedScansData}
uniqueRegions={uniqueRegions}
selectedScanId={selectedScanId}
/>
</div>
<Suspense key={searchParamsKey} fallback={<ComplianceSkeletonGrid />}>
{/* Row 2: ThreatScore card — full width, horizontal */}
{threatScoreData &&
typeof selectedScanId === "string" &&
selectedScan && (
<div className="mb-6">
<ThreatScoreBadge
score={threatScoreData.score}
scanId={selectedScanId}
provider={selectedScan.providerInfo.provider}
selectedScan={selectedScanData}
sectionScores={threatScoreData.sectionScores}
/>
</div>
)}
{/* Row 3: Compliance grid with client-side search */}
<Suspense
key={searchParamsKey}
fallback={
<ComplianceOverviewPanel>
<ComplianceSkeletonGrid />
</ComplianceOverviewPanel>
}
>
<SSRComplianceGrid
searchParams={resolvedSearchParams}
selectedScan={selectedScanData}
@@ -176,25 +186,23 @@ const SSRComplianceGrid = async ({
const scanId = searchParams.scanId?.toString() || "";
const regionFilter = searchParams["filter[region__in]"]?.toString() || "";
// Extract all filter parameters
const filters = Object.fromEntries(
Object.entries(searchParams).filter(([key]) => key.startsWith("filter[")),
);
// Extract query from filters
const query = (filters["filter[search]"] as string) || "";
// Only fetch compliance data if we have a valid scanId
const compliancesData =
scanId && scanId.trim() !== ""
? await getCompliancesOverview({
scanId,
region: regionFilter,
query,
})
: { data: [], errors: [] };
const type = compliancesData?.data?.type;
const frameworks = compliancesData?.data
?.filter((compliance: ComplianceOverviewData) => {
return compliance.attributes.framework !== "ProwlerThreatScore";
})
.sort((a: ComplianceOverviewData, b: ComplianceOverviewData) =>
a.attributes.framework.localeCompare(b.attributes.framework),
);
// Check if the response contains no data
if (
@@ -204,58 +212,49 @@ const SSRComplianceGrid = async ({
type === "tasks"
) {
return (
<div className="flex h-full items-center">
<div className="text-default-500 text-sm">
No compliance data available for the selected scan.
</div>
</div>
<Alert variant="info">
<Info className="size-4" />
<AlertDescription>
This scan has no compliance data available yet, please select a
different one.
</AlertDescription>
</Alert>
);
}
// Handle errors returned by the API
if (compliancesData?.errors?.length > 0) {
return (
<div className="flex h-full items-center">
<div className="text-default-500 text-sm">Provide a valid scan ID.</div>
</div>
<Alert variant="info">
<Info className="size-4" />
<AlertDescription>Provide a valid scan ID.</AlertDescription>
</Alert>
);
}
return (
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{compliancesData.data
.filter((compliance: ComplianceOverviewData) => {
// Filter out ProwlerThreatScore from the grid
return compliance.attributes.framework !== "ProwlerThreatScore";
})
.sort((a: ComplianceOverviewData, b: ComplianceOverviewData) =>
a.attributes.framework.localeCompare(b.attributes.framework),
)
.map((compliance: ComplianceOverviewData) => {
const { attributes, id } = compliance;
const {
framework,
version,
requirements_passed,
total_requirements,
} = attributes;
return (
<ComplianceCard
key={id}
title={framework}
version={version}
passingRequirements={requirements_passed}
totalRequirements={total_requirements}
prevPassingRequirements={requirements_passed}
prevTotalRequirements={total_requirements}
scanId={scanId}
complianceId={id}
id={id}
selectedScan={selectedScan}
/>
);
})}
</div>
<ComplianceOverviewPanel>
<ComplianceOverviewGrid
frameworks={frameworks}
scanId={scanId}
selectedScan={selectedScan}
/>
</ComplianceOverviewPanel>
);
};
const ComplianceOverviewPanel = ({
children,
}: {
children: React.ReactNode;
}) => {
return (
<Card
variant="base"
padding="none"
className="minimal-scrollbar shadow-small relative z-0 w-full gap-4 overflow-auto"
>
<CardContent className="flex flex-col gap-4 p-4">{children}</CardContent>
</Card>
);
};
@@ -0,0 +1,30 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceCard", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-card.tsx");
const source = readFileSync(filePath, "utf8");
it("keeps the shadcn Card base variant", () => {
expect(source).toContain('variant="base"');
});
it("uses a responsive stacked layout for narrow screens", () => {
expect(source).toContain("flex-col");
expect(source).toContain("sm:flex-row");
});
it("uses the shadcn progress component instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/progress"');
expect(source).not.toContain("@heroui/progress");
});
it("places compact actions in the icon column on larger screens", () => {
expect(source).toContain('orientation="column"');
expect(source).toContain('buttonWidth="icon"');
});
});
+77 -54
View File
@@ -1,11 +1,20 @@
"use client";
import { Progress } from "@heroui/progress";
import Image from "next/image";
import { useRouter, useSearchParams } from "next/navigation";
import { Card, CardContent } from "@/components/shadcn/card/card";
import { Progress } from "@/components/shadcn/progress";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { getReportTypeForFramework } from "@/lib/compliance/compliance-report-types";
import {
getScoreIndicatorClass,
type ScoreColorVariant,
} from "@/lib/compliance/score-utils";
import { ScanEntity } from "@/types/scans";
import { getComplianceIcon } from "../icons";
@@ -45,13 +54,9 @@ export const ComplianceCard: React.FC<ComplianceCardProps> = ({
(passingRequirements / totalRequirements) * 100,
);
const getRatingColor = (ratingPercentage: number) => {
if (ratingPercentage <= 10) {
return "danger";
}
if (ratingPercentage <= 40) {
return "warning";
}
const getRatingVariant = (value: number): ScoreColorVariant => {
if (value <= 10) return "danger";
if (value <= 40) return "warning";
return "success";
};
@@ -80,58 +85,76 @@ export const ComplianceCard: React.FC<ComplianceCardProps> = ({
onClick={navigateToDetail}
>
<CardContent className="p-0">
<div className="flex w-full items-center gap-4">
{getComplianceIcon(title) && (
<Image
src={getComplianceIcon(title)}
alt={`${title} logo`}
className="h-10 w-10 min-w-10 rounded-md border border-gray-300 bg-white object-contain p-1"
/>
)}
<div className="flex w-full flex-col">
<h4 className="text-small mb-1 leading-5 font-bold">
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</h4>
<Progress
label="Score:"
size="sm"
aria-label="Compliance score"
value={ratingPercentage}
showValueLabel={true}
classNames={{
track: "drop-shadow-sm border border-default",
label: "tracking-wider font-medium text-default-600 text-xs",
value: "text-foreground/60 -mb-2",
<div className="flex w-full flex-col gap-3 sm:flex-row sm:items-start">
<div className="flex shrink-0 items-center justify-between sm:flex-col sm:items-start sm:gap-2">
{getComplianceIcon(title) && (
<Image
src={getComplianceIcon(title)}
alt={`${title} logo`}
className="h-10 w-10 min-w-10 self-start rounded-md border border-gray-300 bg-white object-contain p-1"
/>
)}
<div
className="shrink-0"
onClick={(e) => e.stopPropagation()}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.stopPropagation();
}
}}
color={getRatingColor(ratingPercentage)}
/>
<div className="mt-2 flex items-center justify-between">
<small>
role="group"
tabIndex={0}
>
<ComplianceDownloadContainer
compact
orientation="column"
buttonWidth="icon"
presentation="dropdown"
scanId={scanId}
complianceId={complianceId}
reportType={getReportTypeForFramework(title)}
disabled={hasRegionFilter}
/>
</div>
</div>
<div className="flex w-full min-w-0 flex-col gap-3">
<Tooltip>
<TooltipTrigger asChild>
<h4 className="text-small truncate leading-5 font-bold">
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</h4>
</TooltipTrigger>
<TooltipContent>
{formatTitle(title)}
{version ? ` - ${version}` : ""}
</TooltipContent>
</Tooltip>
<div className="flex flex-col gap-2">
<div className="flex items-center justify-between gap-3 text-xs">
<span className="text-text-neutral-secondary font-medium tracking-wider">
Score:
</span>
<span className="text-text-neutral-secondary">
{ratingPercentage}%
</span>
</div>
<Progress
aria-label="Compliance score"
value={ratingPercentage}
className="border-border-neutral-secondary h-2.5 border drop-shadow-sm"
indicatorClassName={getScoreIndicatorClass(
getRatingVariant(ratingPercentage),
)}
/>
</div>
<div className="flex flex-col gap-3 sm:flex-row sm:items-center sm:justify-between">
<small className="min-w-0">
<span className="mr-1 text-xs font-semibold">
{passingRequirements} / {totalRequirements}
</span>
Passing Requirements
</small>
<div
onClick={(e) => e.stopPropagation()}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.stopPropagation();
}
}}
role="group"
tabIndex={0}
>
<ComplianceDownloadContainer
compact
scanId={scanId}
complianceId={complianceId}
reportType={getReportTypeForFramework(title)}
disabled={hasRegionFilter}
/>
</div>
</div>
</div>
</div>
@@ -0,0 +1,133 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { beforeEach, describe, expect, it, vi } from "vitest";
const { downloadComplianceCsvMock, downloadComplianceReportPdfMock } =
vi.hoisted(() => ({
downloadComplianceCsvMock: vi.fn(),
downloadComplianceReportPdfMock: vi.fn(),
}));
vi.mock("@/lib/helper", () => ({
downloadComplianceCsv: downloadComplianceCsvMock,
downloadComplianceReportPdf: downloadComplianceReportPdfMock,
}));
vi.mock("@/components/ui", () => ({
toast: {},
}));
import { ComplianceDownloadContainer } from "./compliance-download-container";
describe("ComplianceDownloadContainer", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-download-container.tsx");
const source = readFileSync(filePath, "utf8");
beforeEach(() => {
vi.clearAllMocks();
});
it("uses the shared action dropdown for the card actions mode", () => {
expect(source).toContain("ActionDropdown");
expect(source).not.toContain("@heroui/button");
});
it("should expose an accessible actions menu trigger", () => {
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
expect(
screen.getByRole("button", { name: "Open compliance export actions" }),
).toBeInTheDocument();
});
it("should support fixed icon-sized dropdown trigger in column mode", () => {
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
orientation="column"
buttonWidth="icon"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
const trigger = screen.getByRole("button", {
name: "Open compliance export actions",
});
expect(trigger.className).toContain("border-text-neutral-secondary");
});
it("should open export actions from the compact trigger", async () => {
const user = userEvent.setup();
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
expect(screen.getByText("Download CSV report")).toBeInTheDocument();
expect(screen.getByText("Download PDF report")).toBeInTheDocument();
});
it("should trigger both downloads from the actions menu", async () => {
const user = userEvent.setup();
render(
<ComplianceDownloadContainer
compact
presentation="dropdown"
scanId="scan-1"
complianceId="compliance-1"
reportType="threatscore"
/>,
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
await user.click(
screen.getByRole("menuitem", { name: /Download CSV report/i }),
);
await user.click(
screen.getByRole("button", { name: "Open compliance export actions" }),
);
await user.click(
screen.getByRole("menuitem", { name: /Download PDF report/i }),
);
expect(downloadComplianceCsvMock).toHaveBeenCalledWith(
"scan-1",
"compliance-1",
{},
);
expect(downloadComplianceReportPdfMock).toHaveBeenCalledWith(
"scan-1",
"threatscore",
{},
);
});
});
@@ -4,6 +4,15 @@ import { DownloadIcon, FileTextIcon } from "lucide-react";
import { useState } from "react";
import { Button } from "@/components/shadcn/button/button";
import {
ActionDropdown,
ActionDropdownItem,
} from "@/components/shadcn/dropdown";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { toast } from "@/components/ui";
import type { ComplianceReportType } from "@/lib/compliance/compliance-report-types";
import {
@@ -18,6 +27,9 @@ interface ComplianceDownloadContainerProps {
reportType?: ComplianceReportType;
compact?: boolean;
disabled?: boolean;
orientation?: "row" | "column";
buttonWidth?: "auto" | "icon";
presentation?: "buttons" | "dropdown";
}
export const ComplianceDownloadContainer = ({
@@ -26,9 +38,14 @@ export const ComplianceDownloadContainer = ({
reportType,
compact = false,
disabled = false,
orientation = "row",
buttonWidth = "auto",
presentation = "buttons",
}: ComplianceDownloadContainerProps) => {
const [isDownloadingCsv, setIsDownloadingCsv] = useState(false);
const [isDownloadingPdf, setIsDownloadingPdf] = useState(false);
const isIconWidth = buttonWidth === "icon";
const isDropdown = presentation === "dropdown";
const handleDownloadCsv = async () => {
if (isDownloadingCsv) return;
@@ -52,40 +69,116 @@ export const ComplianceDownloadContainer = ({
const buttonClassName = cn(
"border-button-primary text-button-primary hover:bg-button-primary/10",
compact && "h-7 px-2 text-xs",
compact &&
!isIconWidth &&
"h-7 px-2 text-xs sm:w-full sm:justify-center sm:px-2.5",
orientation === "column" && !isIconWidth && "w-full",
isIconWidth && "size-10 rounded-lg p-0",
);
const labelClassName = isIconWidth
? "sr-only"
: compact
? "sr-only sm:not-sr-only"
: undefined;
const showTooltip = compact || isIconWidth;
return (
<div className={cn("flex gap-2", compact ? "items-center" : "flex-col")}>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
aria-label="Download compliance CSV report"
>
<FileTextIcon
size={14}
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
CSV
</Button>
{reportType && (
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
aria-label="Download compliance PDF report"
<div
className={cn(
"flex",
orientation === "column"
? "flex-col items-start"
: compact
? "w-full justify-end sm:w-auto"
: "flex-row",
)}
>
{isDropdown ? (
<ActionDropdown
variant={isIconWidth ? "bordered" : "table"}
ariaLabel="Open compliance export actions"
>
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
<ActionDropdownItem
icon={
<FileTextIcon
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
}
label="Download CSV report"
onSelect={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
/>
PDF
</Button>
{reportType && (
<ActionDropdownItem
icon={
<DownloadIcon
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
}
label="Download PDF report"
onSelect={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
/>
)}
</ActionDropdown>
) : (
<div
className={cn(
"flex gap-2",
orientation === "column"
? isIconWidth
? "flex-col items-start"
: "flex-col items-stretch"
: compact
? "w-full flex-wrap items-center justify-end sm:w-auto sm:flex-nowrap"
: "flex-row flex-wrap items-center",
)}
>
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadCsv}
disabled={disabled || isDownloadingCsv}
aria-label="Download compliance CSV report"
>
<FileTextIcon
size={14}
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
<span className={labelClassName}>CSV</span>
</Button>
</TooltipTrigger>
{showTooltip && (
<TooltipContent>Download CSV report</TooltipContent>
)}
</Tooltip>
{reportType && (
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
variant="outline"
className={buttonClassName}
onClick={handleDownloadPdf}
disabled={disabled || isDownloadingPdf}
aria-label="Download compliance PDF report"
>
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
<span className={labelClassName}>PDF</span>
</Button>
</TooltipTrigger>
{showTooltip && (
<TooltipContent>Download PDF report</TooltipContent>
)}
</Tooltip>
)}
</div>
)}
</div>
);
@@ -0,0 +1,76 @@
"use client";
import { useRouter, useSearchParams } from "next/navigation";
import { ClearFiltersButton } from "@/components/filters/clear-filters-button";
import {
MultiSelect,
MultiSelectContent,
MultiSelectItem,
MultiSelectSelectAll,
MultiSelectSeparator,
MultiSelectTrigger,
MultiSelectValue,
} from "@/components/shadcn/select/multiselect";
import { useUrlFilters } from "@/hooks/use-url-filters";
import { ScanSelector, SelectScanComplianceDataProps } from "./scan-selector";
interface ComplianceFiltersProps {
scans: SelectScanComplianceDataProps["scans"];
uniqueRegions: string[];
selectedScanId: string;
}
export const ComplianceFilters = ({
scans,
uniqueRegions,
selectedScanId,
}: ComplianceFiltersProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const { updateFilter } = useUrlFilters();
const handleScanChange = (selectedKey: string) => {
const params = new URLSearchParams(searchParams);
params.set("scanId", selectedKey);
router.push(`?${params.toString()}`, { scroll: false });
};
const regionValues =
searchParams.get("filter[region__in]")?.split(",").filter(Boolean) ?? [];
return (
<div className="flex max-w-4xl flex-wrap items-center gap-4">
<div className="w-full sm:max-w-[380px] sm:min-w-[200px] sm:flex-1">
<ScanSelector
scans={scans}
selectedScanId={selectedScanId}
onSelectionChange={handleScanChange}
/>
</div>
{uniqueRegions.length > 0 && (
<div className="w-full sm:max-w-[280px] sm:min-w-[200px] sm:flex-1">
<MultiSelect
values={regionValues}
onValuesChange={(values) => updateFilter("region__in", values)}
>
<MultiSelectTrigger size="default">
<MultiSelectValue placeholder="All Regions" />
</MultiSelectTrigger>
<MultiSelectContent search={false} width="wide">
<MultiSelectSelectAll>Select All</MultiSelectSelectAll>
<MultiSelectSeparator />
{uniqueRegions.map((region) => (
<MultiSelectItem key={region} value={region}>
{region}
</MultiSelectItem>
))}
</MultiSelectContent>
</MultiSelect>
</div>
)}
<ClearFiltersButton showCount />
</div>
);
};
@@ -0,0 +1,18 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceHeader", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-header.tsx");
const source = readFileSync(filePath, "utf8");
it("renders the scan selector inside the shared filters grid using default layout", () => {
expect(source).toContain("prependElement");
expect(source).toContain("<DataCompliance");
expect(source).toContain("DataTableFilterCustom");
expect(source).not.toContain("gridClassName");
});
});
@@ -35,6 +35,9 @@ export const ComplianceHeader = ({
selectedScan,
}: ComplianceHeaderProps) => {
const frameworkFilters = [];
const prependElement = showProviders ? (
<DataCompliance scans={scans} className="w-full sm:col-span-2" />
) : undefined;
// Add CIS Profile Level filter if framework is CIS
if (framework === "CIS") {
@@ -42,6 +45,7 @@ export const ComplianceHeader = ({
key: "cis_profile_level",
labelCheckboxGroup: "Level",
values: ["Level 1", "Level 2"],
width: "wide" as const,
index: 0, // Show first
showSelectAll: false, // No "Select All" option since Level 2 includes Level 1
defaultValues: ["Level 2"], // Default to Level 2 selected (which includes Level 1)
@@ -55,6 +59,7 @@ export const ComplianceHeader = ({
key: "region__in",
labelCheckboxGroup: "Regions",
values: uniqueRegions,
width: "wide" as const,
index: 1, // Show after framework filters
},
]
@@ -77,9 +82,11 @@ export const ComplianceHeader = ({
{selectedScan && <ComplianceScanInfo scan={selectedScan} />}
{/* Showed in the compliance page */}
{showProviders && <DataCompliance scans={scans} />}
{!hideFilters && allFilters.length > 0 && (
<DataTableFilterCustom filters={allFilters} />
{!hideFilters && (allFilters.length > 0 || showProviders) && (
<DataTableFilterCustom
filters={allFilters}
prependElement={prependElement}
/>
)}
</div>
{logoPath && complianceTitle && (
@@ -7,11 +7,13 @@ import {
ScanSelector,
SelectScanComplianceDataProps,
} from "@/components/compliance/compliance-header/index";
import { cn } from "@/lib/utils";
interface DataComplianceProps {
scans: SelectScanComplianceDataProps["scans"];
className?: string;
}
export const DataCompliance = ({ scans }: DataComplianceProps) => {
export const DataCompliance = ({ scans, className }: DataComplianceProps) => {
const router = useRouter();
const searchParams = useSearchParams();
@@ -36,7 +38,7 @@ export const DataCompliance = ({ scans }: DataComplianceProps) => {
};
return (
<div className="flex max-w-fit">
<div className={cn("w-full", className)}>
<ScanSelector
scans={scans}
selectedScanId={selectedScanId}
@@ -1,2 +1,3 @@
export * from "./compliance-filters";
export * from "./data-compliance";
export * from "./scan-selector";
@@ -1,5 +1,6 @@
"use client";
import { Badge } from "@/components/shadcn/badge/badge";
import {
Select,
SelectContent,
@@ -7,6 +8,7 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/shadcn/select/select";
import { getScanEntityLabel } from "@/lib/helper-filters";
import { ProviderType, ScanProps } from "@/types";
import { ComplianceScanInfo } from "./compliance-scan-info";
@@ -29,6 +31,7 @@ export const ScanSelector = ({
onSelectionChange,
}: SelectScanComplianceDataProps) => {
const selectedScan = scans.find((item) => item.id === selectedScanId);
const triggerLabel = selectedScan ? getScanEntityLabel(selectedScan) : "";
return (
<Select
@@ -39,21 +42,28 @@ export const ScanSelector = ({
}
}}
>
<SelectTrigger className="w-full max-w-[360px]">
<SelectTrigger className="w-full">
<SelectValue placeholder="Select a scan">
{selectedScan ? (
<ComplianceScanInfo scan={selectedScan} />
<>
<span className="text-text-neutral-secondary shrink-0 text-xs">
Scan:
</span>
<Badge variant="tag" className="truncate">
{triggerLabel}
</Badge>
</>
) : (
"Select a scan"
)}
</SelectValue>
</SelectTrigger>
<SelectContent className="max-w-[360px]">
<SelectContent>
{scans.map((scan) => (
<SelectItem
key={scan.id}
value={scan.id}
className="data-[state=checked]:bg-bg-neutral-tertiary"
className="data-[state=checked]:bg-bg-neutral-tertiary [&_svg:not([class*='size-'])]:size-6"
>
<ComplianceScanInfo scan={scan} />
</SelectItem>
@@ -0,0 +1,70 @@
"use client";
import { useState } from "react";
import { ComplianceCard } from "@/components/compliance/compliance-card";
import { DataTableSearch } from "@/components/ui/table/data-table-search";
import type { ComplianceOverviewData } from "@/types/compliance";
import type { ScanEntity } from "@/types/scans";
interface ComplianceOverviewGridProps {
frameworks: ComplianceOverviewData[];
scanId: string;
selectedScan?: ScanEntity;
}
export const ComplianceOverviewGrid = ({
frameworks,
scanId,
selectedScan,
}: ComplianceOverviewGridProps) => {
const [searchTerm, setSearchTerm] = useState("");
const filteredFrameworks = frameworks.filter((compliance) =>
compliance.attributes.framework
.toLowerCase()
.includes(searchTerm.toLowerCase()),
);
return (
<>
<div className="flex items-center justify-between gap-4">
<DataTableSearch
controlledValue={searchTerm}
onSearchChange={setSearchTerm}
placeholder="Search frameworks..."
/>
<span className="text-text-neutral-secondary shrink-0 text-sm">
{filteredFrameworks.length.toLocaleString()} Total Entries
</span>
</div>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{filteredFrameworks.map((compliance) => {
const { attributes, id } = compliance;
const {
framework,
version,
requirements_passed,
total_requirements,
} = attributes;
return (
<ComplianceCard
key={id}
title={framework}
version={version}
passingRequirements={requirements_passed}
totalRequirements={total_requirements}
prevPassingRequirements={requirements_passed}
prevTotalRequirements={total_requirements}
scanId={scanId}
complianceId={id}
id={id}
selectedScan={selectedScan}
/>
);
})}
</div>
</>
);
};
+2
View File
@@ -13,10 +13,12 @@ export * from "./compliance-custom-details/cis-details";
export * from "./compliance-custom-details/ens-details";
export * from "./compliance-custom-details/iso-details";
export * from "./compliance-download-container";
export * from "./compliance-header/compliance-filters";
export * from "./compliance-header/compliance-header";
export * from "./compliance-header/compliance-scan-info";
export * from "./compliance-header/data-compliance";
export * from "./compliance-header/scan-selector";
export * from "./compliance-overview-grid";
export * from "./no-scans-available";
export * from "./skeletons/bar-chart-skeleton";
export * from "./skeletons/compliance-accordion-skeleton";
@@ -0,0 +1,17 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ComplianceSkeletonGrid", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "compliance-grid-skeleton.tsx");
const source = readFileSync(filePath, "utf8");
it("uses shadcn skeletons instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/skeleton/skeleton"');
expect(source).not.toContain("@heroui/card");
expect(source).not.toContain("@heroui/skeleton");
});
});
@@ -1,19 +1,11 @@
import { Card } from "@heroui/card";
import { Skeleton } from "@heroui/skeleton";
import React from "react";
import { Skeleton } from "@/components/shadcn/skeleton/skeleton";
export const ComplianceSkeletonGrid = () => {
return (
<Card className="h-fit w-full p-4">
<div className="3xl:grid-cols-4 grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-3">
{[...Array(28)].map((_, index) => (
<div key={index} className="flex flex-col gap-4">
<Skeleton className="h-28 rounded-lg">
<div className="bg-default-300 h-full"></div>
</Skeleton>
</div>
))}
</div>
</Card>
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4">
{[...Array(28)].map((_, index) => (
<Skeleton key={index} className="h-28 rounded-xl" />
))}
</div>
);
};
@@ -0,0 +1,32 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("ThreatScoreBadge", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "threatscore-badge.tsx");
const source = readFileSync(filePath, "utf8");
it("uses shadcn card and progress components instead of Hero UI", () => {
expect(source).toContain('from "@/components/shadcn/card/card"');
expect(source).toContain('from "@/components/shadcn/progress"');
expect(source).not.toContain("@heroui/card");
expect(source).not.toContain("@heroui/progress");
});
it("uses ActionDropdown for downloads instead of ComplianceDownloadContainer", () => {
expect(source).toContain("ActionDropdown");
expect(source).toContain("ActionDropdownItem");
expect(source).toContain("downloadComplianceCsv");
expect(source).toContain("downloadComplianceReportPdf");
expect(source).not.toContain("ComplianceDownloadContainer");
});
it("does not use Collapsible components", () => {
expect(source).not.toContain("Collapsible");
expect(source).not.toContain("CollapsibleTrigger");
expect(source).not.toContain("CollapsibleContent");
});
});
+80 -109
View File
@@ -1,27 +1,24 @@
"use client";
import { Card, CardBody } from "@heroui/card";
import { Progress } from "@heroui/progress";
import {
ChevronDown,
ChevronUp,
DownloadIcon,
FileTextIcon,
} from "lucide-react";
import { DownloadIcon, FileTextIcon } from "lucide-react";
import { useRouter, useSearchParams } from "next/navigation";
import { useState } from "react";
import type { SectionScores } from "@/actions/overview/threat-score";
import { ThreatScoreLogo } from "@/components/compliance/threatscore-logo";
import { Button } from "@/components/shadcn/button/button";
import { Card, CardContent } from "@/components/shadcn/card/card";
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/shadcn/collapsible";
ActionDropdown,
ActionDropdownItem,
} from "@/components/shadcn/dropdown";
import { Progress } from "@/components/shadcn/progress";
import { toast } from "@/components/ui";
import { COMPLIANCE_REPORT_TYPES } from "@/lib/compliance/compliance-report-types";
import { getScoreColor, getScoreTextClass } from "@/lib/compliance/score-utils";
import {
getScoreColor,
getScoreIndicatorClass,
getScoreTextClass,
} from "@/lib/compliance/score-utils";
import {
downloadComplianceCsv,
downloadComplianceReportPdf,
@@ -44,9 +41,8 @@ export const ThreatScoreBadge = ({
}: ThreatScoreBadgeProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const [isDownloadingPdf, setIsDownloadingPdf] = useState(false);
const [isDownloadingCsv, setIsDownloadingCsv] = useState(false);
const [isExpanded, setIsExpanded] = useState(false);
const [isDownloadingPdf, setIsDownloadingPdf] = useState(false);
const complianceId = `prowler_threatscore_${provider.toLowerCase()}`;
@@ -69,7 +65,18 @@ export const ThreatScoreBadge = ({
router.push(`${path}?${params.toString()}`);
};
const handleDownloadCsv = async () => {
if (isDownloadingCsv) return;
setIsDownloadingCsv(true);
try {
await downloadComplianceCsv(scanId, complianceId, toast);
} finally {
setIsDownloadingCsv(false);
}
};
const handleDownloadPdf = async () => {
if (isDownloadingPdf) return;
setIsDownloadingPdf(true);
try {
await downloadComplianceReportPdf(
@@ -82,23 +89,12 @@ export const ThreatScoreBadge = ({
}
};
const handleDownloadCsv = async () => {
setIsDownloadingCsv(true);
try {
await downloadComplianceCsv(scanId, complianceId, toast);
} finally {
setIsDownloadingCsv(false);
}
};
return (
<Card
shadow="sm"
className="border-default-200 h-full border bg-transparent"
>
<CardBody className="flex flex-row flex-wrap items-center justify-between gap-3 p-4 lg:flex-col lg:items-stretch lg:justify-start">
<Card variant="base" padding="md" className="relative gap-4">
<CardContent className="flex flex-col gap-4 p-0 pr-14 lg:flex-row lg:items-start lg:gap-6">
{/* Clickable ThreatScore button */}
<button
className="border-default-200 hover:border-default-300 hover:bg-default-50/50 flex w-full cursor-pointer flex-row items-center justify-between gap-4 rounded-lg border bg-transparent p-3 transition-all"
className="border-border-neutral-secondary bg-bg-neutral-tertiary hover:border-border-neutral-primary hover:bg-bg-neutral-secondary flex shrink-0 cursor-pointer flex-row items-center justify-between gap-4 rounded-xl border p-3 pr-12 text-left transition-colors lg:pr-3"
onClick={handleCardClick}
type="button"
>
@@ -111,92 +107,67 @@ export const ThreatScoreBadge = ({
<Progress
aria-label="ThreatScore progress"
value={score}
color={getScoreColor(score)}
size="sm"
className="w-24"
className="border-border-neutral-secondary h-2.5 w-24 border"
indicatorClassName={getScoreIndicatorClass(getScoreColor(score))}
/>
</div>
</button>
{/* Pillar breakdown — always visible */}
{sectionScores && Object.keys(sectionScores).length > 0 && (
<Collapsible
open={isExpanded}
onOpenChange={setIsExpanded}
className="w-full"
>
<CollapsibleTrigger
aria-label={
isExpanded ? "Hide pillar breakdown" : "Show pillar breakdown"
}
className="text-default-500 hover:text-default-700 flex w-auto items-center justify-center gap-1 py-1 text-xs transition-colors lg:w-full"
>
{isExpanded ? (
<>
<ChevronUp size={14} />
Hide pillar breakdown
</>
) : (
<>
<ChevronDown size={14} />
Show pillar breakdown
</>
)}
</CollapsibleTrigger>
<CollapsibleContent className="border-default-200 mt-2 w-full space-y-2 border-t pt-2">
{Object.entries(sectionScores)
.sort(([, a], [, b]) => a - b)
.map(([section, sectionScore]) => (
<div
key={section}
className="flex items-center gap-2 text-xs"
<div className="border-border-neutral-secondary flex-1 space-y-2 border-t pt-3 lg:border-t-0 lg:border-l lg:pt-0 lg:pl-6">
{Object.entries(sectionScores)
.sort(([, a], [, b]) => a - b)
.map(([section, sectionScore]) => (
<div key={section} className="flex items-center gap-2 text-xs">
<span className="text-text-neutral-secondary w-1/3 min-w-0 shrink-0 truncate lg:w-1/4">
{section}
</span>
<Progress
aria-label={`${section} score`}
value={sectionScore}
className="border-border-neutral-secondary h-2 min-w-16 flex-1 border"
indicatorClassName={getScoreIndicatorClass(
getScoreColor(sectionScore),
)}
/>
<span
className={`w-12 shrink-0 text-right font-medium ${getScoreTextClass(sectionScore)}`}
>
<span className="text-default-600 w-1/3 min-w-0 shrink-0 truncate">
{section}
</span>
<Progress
aria-label={`${section} score`}
value={sectionScore}
color={getScoreColor(sectionScore)}
size="sm"
className="min-w-16 flex-1"
/>
<span
className={`w-12 shrink-0 text-right font-medium ${getScoreTextClass(sectionScore)}`}
>
{sectionScore.toFixed(1)}%
</span>
</div>
))}
</CollapsibleContent>
</Collapsible>
{sectionScore.toFixed(1)}%
</span>
</div>
))}
</div>
)}
</CardContent>
<div className="flex gap-2">
<Button
size="sm"
variant="outline"
className="flex-1"
onClick={handleDownloadPdf}
disabled={isDownloadingPdf || isDownloadingCsv}
>
<DownloadIcon
size={14}
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
PDF
</Button>
<Button
size="sm"
variant="outline"
className="flex-1"
onClick={handleDownloadCsv}
disabled={isDownloadingCsv || isDownloadingPdf}
>
<FileTextIcon size={14} />
CSV
</Button>
</div>
</CardBody>
{/* ActionDropdown for downloads — top-right */}
<div className="absolute top-3 right-4">
<ActionDropdown
variant="bordered"
ariaLabel="Open compliance export actions"
>
<ActionDropdownItem
icon={
<FileTextIcon
className={isDownloadingCsv ? "animate-download-icon" : ""}
/>
}
label="Download CSV report"
onSelect={handleDownloadCsv}
/>
<ActionDropdownItem
icon={
<DownloadIcon
className={isDownloadingPdf ? "animate-download-icon" : ""}
/>
}
label="Download PDF report"
onSelect={handleDownloadPdf}
/>
</ActionDropdown>
</div>
</Card>
);
};
+1
View File
@@ -11,6 +11,7 @@ export * from "./drawer";
export * from "./dropdown/dropdown";
export * from "./info-field";
export * from "./input/input";
export * from "./progress";
export * from "./search-input/search-input";
export * from "./select/multiselect";
export * from "./select/select";
+42
View File
@@ -0,0 +1,42 @@
"use client";
import * as ProgressPrimitive from "@radix-ui/react-progress";
import { ComponentProps } from "react";
import { cn } from "@/lib/utils";
interface ProgressProps extends ComponentProps<typeof ProgressPrimitive.Root> {
indicatorClassName?: string;
}
function Progress({
className,
value = 0,
indicatorClassName,
...props
}: ProgressProps) {
const normalizedValue = value ?? 0;
return (
<ProgressPrimitive.Root
data-slot="progress"
value={normalizedValue}
className={cn(
"bg-bg-neutral-tertiary relative h-2 w-full overflow-hidden rounded-full",
className,
)}
{...props}
>
<ProgressPrimitive.Indicator
data-slot="progress-indicator"
className={cn(
"bg-button-primary h-full w-full flex-1 transition-all",
indicatorClassName,
)}
style={{ transform: `translateX(-${100 - normalizedValue}%)` }}
/>
</ProgressPrimitive.Root>
);
}
export { Progress };
-16
View File
@@ -1015,14 +1015,6 @@
"strategy": "installed",
"generatedAt": "2026-01-19T13:54:24.770Z"
},
{
"section": "devDependencies",
"name": "husky",
"from": "9.1.7",
"to": "9.1.7",
"strategy": "installed",
"generatedAt": "2025-10-22T12:36:37.962Z"
},
{
"section": "devDependencies",
"name": "jsdom",
@@ -1031,14 +1023,6 @@
"strategy": "installed",
"generatedAt": "2026-01-29T16:42:27.795Z"
},
{
"section": "devDependencies",
"name": "lint-staged",
"from": "15.5.2",
"to": "15.5.2",
"strategy": "installed",
"generatedAt": "2025-10-22T12:36:37.962Z"
},
{
"section": "devDependencies",
"name": "postcss",
@@ -1,115 +0,0 @@
# Code Review - Quick Start
## 3 Steps to Enable
### 1. Open `.env`
```bash
nano ui/.env
# or your favorite editor
```
### 2. Find this line
```bash
CODE_REVIEW_ENABLED=false
```
### 3. Change it to
```bash
CODE_REVIEW_ENABLED=true
```
**Done! ✅**
---
## What Happens Now
Every time you `git commit`:
```
✅ If your code complies with AGENTS.md standards:
→ Commit executes normally
❌ If there are standard violations:
→ Commit is BLOCKED
→ You see the errors in the terminal
→ Fix the code
→ Commit again
```
---
## Example
```bash
$ git commit -m "feat: add new component"
🏁 Prowler UI - Pre-Commit Hook
️ Code Review Status: true
🔍 Running Claude Code standards validation...
📋 Files to validate:
- components/my-feature.tsx
📤 Sending to Claude Code for validation...
STATUS: FAILED
- File: components/my-feature.tsx:45
Rule: React Imports
Issue: Using 'import * as React'
Expected: import { useState } from "react"
❌ VALIDATION FAILED
Fix violations before committing
# Fix the file and commit again
$ git commit -m "feat: add new component"
🏁 Prowler UI - Pre-Commit Hook
️ Code Review Status: true
🔍 Running Claude Code standards validation...
✅ VALIDATION PASSED
# Commit successful ✅
```
---
## Disable Temporarily
If you need to commit without validation:
```bash
# Option 1: Change in .env
CODE_REVIEW_ENABLED=false
# Option 2: Bypass (use with caution!)
git commit --no-verify
```
---
## What Gets Validated
- ✅ Correct React imports
- ✅ TypeScript patterns (const-based types)
- ✅ Tailwind CSS (no var() or hex in className)
- ✅ cn() utility (only for conditionals)
- ✅ No useMemo/useCallback without reason
- ✅ Zod v4 syntax
- ✅ File organization
- ✅ Directives "use client"/"use server"
---
## More Info
Read `CODE_REVIEW_SETUP.md` for:
- Troubleshooting
- Complete details
- Advanced configuration
+6
View File
@@ -53,3 +53,9 @@ export function getScoreLabel(score: number): string {
if (score >= SCORE_THRESHOLDS.WARNING) return "Moderate Risk";
return "Critical Risk";
}
export function getScoreIndicatorClass(variant: ScoreColorVariant): string {
if (variant === "danger") return "bg-bg-fail";
if (variant === "warning") return "bg-bg-warning";
return "bg-bg-pass";
}
+9 -1
View File
@@ -216,6 +216,10 @@ export const downloadComplianceCsv = async (
complianceId: string,
toast: ReturnType<typeof useToast>["toast"],
): Promise<void> => {
toast({
title: "Download Started",
description: "Preparing the CSV report. This may take a moment.",
});
const result = await getComplianceCsv(scanId, complianceId);
await downloadFile(
result,
@@ -236,8 +240,12 @@ export const downloadComplianceReportPdf = async (
reportType: ComplianceReportType,
toast: ReturnType<typeof useToast>["toast"],
): Promise<void> => {
const result = await getCompliancePdfReport(scanId, reportType);
const reportName = COMPLIANCE_REPORT_DISPLAY_NAMES[reportType];
toast({
title: "Download Started",
description: `Preparing the ${reportName} PDF report. This may take a moment.`,
});
const result = await getCompliancePdfReport(scanId, reportType);
await downloadFile(
result,
"application/pdf",
-10
View File
@@ -1,10 +0,0 @@
/**
* Query param name + value used to bypass the backward-compat redirect
* in proxy.ts when the user explicitly chose "Create an account"
* from the invitation smart router.
*
* Client sends: /sign-up?invitation_token=&action=signup
* Proxy skips redirect when "action" param is present.
*/
export const INVITATION_ACTION_PARAM = "action";
export const INVITATION_SIGNUP_ACTION = "signup";
+1
View File
@@ -52,6 +52,7 @@
"@radix-ui/react-icons": "1.3.2",
"@radix-ui/react-label": "2.1.7",
"@radix-ui/react-popover": "1.1.15",
"@radix-ui/react-progress": "1.1.7",
"@radix-ui/react-radio-group": "1.3.8",
"@radix-ui/react-scroll-area": "1.2.10",
"@radix-ui/react-select": "2.2.5",
+3
View File
@@ -110,6 +110,9 @@ importers:
'@radix-ui/react-popover':
specifier: 1.1.15
version: 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)
'@radix-ui/react-progress':
specifier: 1.1.7
version: 1.1.7(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)
'@radix-ui/react-radio-group':
specifier: 1.3.8
version: 1.3.8(@types/react-dom@19.2.3(@types/react@19.2.8))(@types/react@19.2.8)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)
+3 -18
View File
@@ -1,7 +1,7 @@
import { NextRequest, NextResponse } from "next/server";
import { NextResponse } from "next/server";
import type { NextAuthRequest } from "next-auth";
import { auth } from "@/auth.config";
import { INVITATION_ACTION_PARAM } from "@/lib/invitation-routing";
const publicRoutes = [
"/sign-in",
@@ -18,24 +18,9 @@ const isPublicRoute = (pathname: string): boolean => {
};
// NextAuth's auth() wrapper - renamed from middleware to proxy
export default auth((req: NextRequest & { auth: any }) => {
export default auth((req: NextAuthRequest) => {
const { pathname } = req.nextUrl;
// Backward compatibility: redirect old invitation links to new smart router
// Skip redirect when the user explicitly chose "Create an account" from the smart router
if (
pathname === "/sign-up" &&
req.nextUrl.searchParams.has("invitation_token") &&
!req.nextUrl.searchParams.has(INVITATION_ACTION_PARAM)
) {
const acceptUrl = new URL("/invitation/accept", req.url);
acceptUrl.searchParams.set(
"invitation_token",
req.nextUrl.searchParams.get("invitation_token")!,
);
return NextResponse.redirect(acceptUrl);
}
const user = req.auth?.user;
const sessionError = req.auth?.error;
+24
View File
@@ -76,4 +76,28 @@ test.describe("Middleware Error Handling", () => {
// Note: Billing and integrations permission tests removed
// These features only exist in Prowler Cloud, not in the open-source version
test(
"should not redirect /sign-up?invitation_token=... to /invitation/accept",
{ tag: ["@e2e", "@auth", "@middleware", "@AUTH-MW-E2E-003"] },
async ({ page, context }) => {
const signUpPage = new SignUpPage(page);
await context.clearCookies();
const token = "test-token-regression";
const response = await page.goto(
`/sign-up?invitation_token=${token}`,
{ waitUntil: "commit" },
);
// The middleware must not rewrite the URL any more. Assert the final
// URL stayed on /sign-up with the token intact, and that the sign-up
// form actually rendered (guards against "URL stayed but page broke").
expect(response?.status()).toBe(200);
await expect(page).toHaveURL(
`/sign-up?invitation_token=${token}`,
);
await signUpPage.verifyPageLoaded();
},
);
});
-1
View File
@@ -54,7 +54,6 @@ export class SignUpPage extends BasePage {
}
async verifyPageLoaded(): Promise<void> {
await expect(this.page).toHaveURL("/sign-up");
await expect(this.emailInput).toBeVisible();
await expect(this.submitButton).toBeVisible();
}