Compare commits

..

3 Commits

Author SHA1 Message Date
Alan Buscaglia
acee366e82 docs(api): add Silk profiling and improve test data guidance 2025-12-16 15:19:06 +01:00
Alan Buscaglia
47d66c9c4c docs(api): update guide with project-specific patterns
- Add rls_transaction usage examples
- Add DJANGO_LOGGING_LEVEL=DEBUG option
- Add RLS context for raw SQL (set_config)
- Add section on partitioned tables index creation
- Improve index design guidance for RLS
2025-12-09 12:47:48 +01:00
Alan Buscaglia
8d41941d22 docs(api): add query performance guide and PR checklist item
- Add query-performance-guide.md with EXPLAIN ANALYZE best practices
- Add checklist item in PR template for query performance validation
- Document common issues, fixes, and minimum test data requirements
2025-12-09 12:43:00 +01:00
43 changed files with 952 additions and 1279 deletions

View File

@@ -32,6 +32,7 @@ Please add a detailed description of how to review this PR.
#### API
- [ ] Verify if API specs need to be regenerated.
- [ ] Check if version updates are required (e.g., specs, Poetry, etc.).
- [ ] Query performance validated with `EXPLAIN ANALYZE` for new/modified endpoints. See [Query Performance Guide](https://github.com/prowler-cloud/prowler/blob/master/api/docs/query-performance-guide.md).
- [ ] Ensure new entries are added to [CHANGELOG.md](https://github.com/prowler-cloud/prowler/blob/master/api/CHANGELOG.md), if applicable.
### License

View File

@@ -1,4 +1,4 @@
name: UI - E2E Cloud Tests
name: UI - E2E Tests
on:
pull_request:
@@ -6,185 +6,125 @@ on:
- master
- "v5.*"
paths:
- ".github/workflows/ui-e2e-tests.yml"
- "ui/**"
push:
branches:
- master
- "v5.*"
paths:
- ".github/workflows/ui-e2e-cloud-tests.yml"
- "ui/**"
workflow_run:
workflows:
- "API - Build, Push and Deploy"
- "UI - Build, Push and Deploy"
types: [completed]
branches: [master, v5.*]
workflow_dispatch:
inputs:
environment:
description: "Environment to test"
required: true
default: "dev"
type: choice
options:
- dev
- stg
- pro
permissions:
id-token: write
contents: read
actions: read
- '.github/workflows/ui-e2e-tests.yml'
- 'ui/**'
jobs:
e2e-tests:
if: github.repository == 'prowler-cloud/prowler-cloud'
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
env:
NEXTAUTH_URL: "http://localhost:3000"
AUTH_SECRET: "fallback-ci-secret-for-testing"
AUTH_TRUST_HOST: "true"
AUTH_SECRET: 'fallback-ci-secret-for-testing'
AUTH_TRUST_HOST: true
NEXTAUTH_URL: 'http://localhost:3000'
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
E2E_AZURE_SUBSCRIPTION_ID: ${{ secrets.E2E_AZURE_SUBSCRIPTION_ID }}
E2E_AZURE_CLIENT_ID: ${{ secrets.E2E_AZURE_CLIENT_ID }}
E2E_AZURE_SECRET_ID: ${{ secrets.E2E_AZURE_SECRET_ID }}
E2E_AZURE_TENANT_ID: ${{ secrets.E2E_AZURE_TENANT_ID }}
E2E_M365_DOMAIN_ID: ${{ secrets.E2E_M365_DOMAIN_ID }}
E2E_M365_CLIENT_ID: ${{ secrets.E2E_M365_CLIENT_ID }}
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
E2E_KUBERNETES_CONTEXT: 'kind-kind'
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
steps:
- name: Determine environment
id: env
run: |
if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" ]]; then
echo "environment=dev" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_run" && "${{ github.event.workflow_run.conclusion }}" == "success" && "${{ github.event.workflow_run.event }}" == "release" ]]; then
echo "environment=stg" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "environment=${{ github.event.inputs.environment }}" >> $GITHUB_OUTPUT
else
echo "Unknown trigger, skipping..."
exit 1
fi
- name: Set environment variables
id: vars
run: |
case "${{ steps.env.outputs.environment }}" in
"dev")
echo "api_url=https://api.dev.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=DEV_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=DEV_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=DEV" >> $GITHUB_OUTPUT
;;
"stg")
echo "api_url=https://api.stg.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=STG_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=STG_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=STG" >> $GITHUB_OUTPUT
;;
"pro")
echo "api_url=https://api.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=PRO_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=PRO_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=PRO" >> $GITHUB_OUTPUT
;;
esac
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Environment info
env:
ENV_NAME: ${{ steps.vars.outputs.environment_name }}
API_URL: ${{ steps.vars.outputs.api_url }}
run: |
echo "Environment: $ENV_NAME"
echo "API URL: $API_URL"
echo "Workflow: ${{ github.workflow }}"
echo "Event: ${{ github.event_name }}"
echo "Started at: $(date)"
- name: Verify both STG deployments completed
if: steps.env.outputs.environment == 'stg'
env:
GH_TOKEN: ${{ github.token }}
run: |
echo "Verifying that both API and UI deployments completed successfully..."
# Get the latest runs for both workflows triggered by the same release
API_RUN=$(gh run list --workflow="API - Build, Push and Deploy" --event=release --limit=1 --json status,conclusion,createdAt --jq '.[0]')
API_STATUS=$(echo "$API_RUN" | jq -r '.status')
API_CONCLUSION=$(echo "$API_RUN" | jq -r '.conclusion')
UI_RUN=$(gh run list --workflow="UI - Build, Push and Deploy" --event=release --limit=1 --json status,conclusion,createdAt --jq '.[0]')
UI_STATUS=$(echo "$UI_RUN" | jq -r '.status')
UI_CONCLUSION=$(echo "$UI_RUN" | jq -r '.conclusion')
echo "API workflow - Status: $API_STATUS, Conclusion: $API_CONCLUSION"
echo "UI workflow - Status: $UI_STATUS, Conclusion: $UI_CONCLUSION"
# Verify both workflows completed successfully
if [[ "$API_STATUS" != "completed" || "$API_CONCLUSION" != "success" ]]; then
echo "API deployment not ready (Status: $API_STATUS, Conclusion: $API_CONCLUSION)"
exit 1
fi
if [[ "$UI_STATUS" != "completed" || "$UI_CONCLUSION" != "success" ]]; then
echo "UI deployment not ready (Status: $UI_STATUS, Conclusion: $UI_CONCLUSION)"
exit 1
fi
echo "Both API and UI deployments completed successfully for STG"
- name: Verify both PRO deployments completed
if: steps.env.outputs.environment == 'pro'
env:
GH_TOKEN: ${{ github.token }}
run: |
echo "Verifying that both API and UI deployments completed successfully..."
# Get the latest manual runs for both workflows
API_RUN=$(gh run list --workflow="API - Build, Push and Deploy" --event=workflow_dispatch --limit=1 --json status,conclusion,createdAt --jq '.[0]')
API_STATUS=$(echo "$API_RUN" | jq -r '.status')
API_CONCLUSION=$(echo "$API_RUN" | jq -r '.conclusion')
UI_RUN=$(gh run list --workflow="UI - Build, Push and Deploy" --event=workflow_dispatch --limit=1 --json status,conclusion,createdAt --jq '.[0]')
UI_STATUS=$(echo "$UI_RUN" | jq -r '.status')
UI_CONCLUSION=$(echo "$UI_RUN" | jq -r '.conclusion')
echo "API workflow - Status: $API_STATUS, Conclusion: $API_CONCLUSION"
echo "UI workflow - Status: $UI_STATUS, Conclusion: $UI_CONCLUSION"
# Verify both workflows completed successfully
if [[ "$API_STATUS" != "completed" || "$API_CONCLUSION" != "success" ]]; then
echo "API deployment not ready (Status: $API_STATUS, Conclusion: $API_CONCLUSION)"
exit 1
fi
if [[ "$UI_STATUS" != "completed" || "$UI_CONCLUSION" != "success" ]]; then
echo "UI deployment not ready (Status: $UI_STATUS, Conclusion: $UI_CONCLUSION)"
exit 1
fi
echo "Both API and UI deployments completed successfully for PRO"
- name: Setup Tailscale
if: steps.env.outputs.environment != 'pro'
uses: tailscale/github-action@84a3f23bb4d843bcf4da6cf824ec1be473daf4de # v3.2.3
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
tags: tag:github-actions
- name: Verify API is accessible
env:
API_URL: ${{ steps.vars.outputs.api_url }}
ENV_NAME: ${{ steps.vars.outputs.environment_name }}
cluster_name: kind
- name: Modify kubeconfig
run: |
echo "Checking $ENV_NAME API at $API_URL/docs..."
curl -f --connect-timeout 30 --max-time 60 ${API_URL}/docs
echo "$ENV_NAME API is accessible"
# Modify the kubeconfig to use the kind cluster server to https://kind-control-plane:6443
# from worker service into docker-compose.yml
kubectl config set-cluster kind-kind --server=https://kind-control-plane:6443
kubectl config view
- name: Add network kind to docker compose
run: |
# Add the network kind to the docker compose to interconnect to kind cluster
yq -i '.networks.kind.external = true' docker-compose.yml
# Add network kind to worker service and default network too
yq -i '.services.worker.networks = ["kind","default"]' docker-compose.yml
- name: Fix API data directory permissions
run: docker run --rm -v $(pwd)/_data/api:/data alpine chown -R 1000:1000 /data
- name: Add AWS credentials for testing AWS SDK Default Adding Provider
run: |
echo "Adding AWS credentials for testing AWS SDK Default Adding Provider..."
echo "AWS_ACCESS_KEY_ID=${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}" >> .env
echo "AWS_SECRET_ACCESS_KEY=${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}" >> .env
- name: Start API services
run: |
# Override docker-compose image tag to use latest instead of stable
# This overrides any PROWLER_API_VERSION set in .env file
export PROWLER_API_VERSION=latest
echo "Using PROWLER_API_VERSION=${PROWLER_API_VERSION}"
docker compose up -d api worker worker-beat
- name: Wait for API to be ready
run: |
echo "Waiting for prowler-api..."
timeout=150 # 5 minutes max
elapsed=0
while [ $elapsed -lt $timeout ]; do
if curl -s ${NEXT_PUBLIC_API_BASE_URL}/docs >/dev/null 2>&1; then
echo "Prowler API is ready!"
exit 0
fi
echo "Waiting for prowler-api... (${elapsed}s elapsed)"
sleep 5
elapsed=$((elapsed + 5))
done
echo "Timeout waiting for prowler-api to start"
exit 1
- name: Load database fixtures for E2E tests
run: |
docker compose exec -T api sh -c '
echo "Loading all fixtures from api/fixtures/dev/..."
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
fi
done
echo "All database fixtures loaded successfully!"
'
- name: Setup Node.js environment
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: "20.x"
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
node-version: '20.x'
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
@@ -197,10 +137,6 @@ jobs:
run: pnpm install --frozen-lockfile
- name: Build UI application
working-directory: ./ui
env:
NEXT_PUBLIC_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
NEXT_PUBLIC_IS_CLOUD_ENV: "true"
CLOUD_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
run: pnpm run build
- name: Cache Playwright browsers
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
@@ -216,50 +152,17 @@ jobs:
run: pnpm run test:e2e:install
- name: Run E2E tests
working-directory: ./ui
env:
NEXT_PUBLIC_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
NEXT_PUBLIC_IS_CLOUD_ENV: "true"
CLOUD_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
E2E_USER: ${{ secrets[steps.vars.outputs.e2e_user_secret] }}
E2E_PASSWORD: ${{ secrets[steps.vars.outputs.e2e_password_secret] }}
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
E2E_AZURE_SUBSCRIPTION_ID: ${{ secrets.E2E_AZURE_SUBSCRIPTION_ID }}
E2E_AZURE_CLIENT_ID: ${{ secrets.E2E_AZURE_CLIENT_ID }}
E2E_AZURE_SECRET_ID: ${{ secrets.E2E_AZURE_SECRET_ID }}
E2E_AZURE_TENANT_ID: ${{ secrets.E2E_AZURE_TENANT_ID }}
E2E_M365_DOMAIN_ID: ${{ secrets.E2E_M365_DOMAIN_ID }}
E2E_M365_CLIENT_ID: ${{ secrets.E2E_M365_CLIENT_ID }}
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
E2E_KUBERNETES_CONTEXT: "kind-kind"
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
run: pnpm run test:e2e-cloud
run: pnpm run test:e2e
- name: Upload test reports
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
if: failure()
with:
name: playwright-report-${{ steps.env.outputs.environment }}-${{ github.run_number }}
name: playwright-report
path: ui/playwright-report/
retention-days: 30
- name: Cleanup services
if: always()
run: |
echo "Shutting down services..."
docker compose down -v || true
echo "Cleanup completed"

View File

@@ -0,0 +1,457 @@
# Query Performance Guide
## Overview
This guide explains how to validate query performance when developing new endpoints or modifying existing ones. **This is part of the development process**, not a separate task—just like writing unit tests.
The goal is simple: ensure PostgreSQL uses indexes correctly for the queries your code generates.
## When to Validate
You **must** validate query performance when:
- Creating a new endpoint that queries the database
- Modifying an existing query (adding filters, joins, or sorting)
- Adding new indexes
- Working on performance-critical endpoints (overviews, findings, resources)
## Profiling with Django Silk (Recommended)
[Django Silk](https://github.com/jazzband/django-silk) is the recommended way to profile queries because it captures the actual SQL generated by your code during real HTTP requests. This gives you the most accurate picture of what happens in production.
### Enabling Silk
Silk is installed as a dev dependency but disabled by default. To enable it temporarily for profiling:
#### 1. Add Silk to your local settings
In `api/src/backend/config/django/devel.py`, add at the end of the file:
```python
# Silk profiler (temporary - remove after profiling)
INSTALLED_APPS += ["silk"] # noqa: F405
MIDDLEWARE += ["silk.middleware.SilkyMiddleware"] # noqa: F405
```
#### 2. Add Silk URLs
In `api/src/backend/api/v1/urls.py`, add at the end:
```python
from django.conf import settings
if settings.DEBUG:
urlpatterns += [path("silk/", include("silk.urls", namespace="silk"))]
```
#### 3. Run Silk migrations
```bash
cd api/src/backend
poetry run python manage.py migrate --database admin
```
#### 4. Access Silk
Start the development server and navigate to `http://localhost:8000/api/v1/silk/`
### Using Silk
1. Make requests to the endpoint you want to profile
2. Open Silk UI and find your request
3. Click on the request to see all SQL queries executed
4. For each query, you can see:
- Execution time
- Number of similar queries (N+1 detection)
- The actual SQL with parameters
- **EXPLAIN output** (click on a query to see it)
### Disabling Silk
After profiling, **remove the changes** you made to `devel.py` and `urls.py`. Don't commit Silk configuration to the repository.
## Manual Query Analysis with EXPLAIN ANALYZE
For quick checks or when you need more control, you can run `EXPLAIN ANALYZE` directly.
### 1. Get Your Query
#### Option A: Using Django Shell with RLS
This approach mirrors how queries run in production with Row Level Security enabled:
```bash
cd api/src/backend
poetry run python manage.py shell
```
```python
from django.db import connection
from api.db_utils import rls_transaction
from api.models import Finding
tenant_id = "your-tenant-uuid"
with rls_transaction(tenant_id):
# Build your queryset
qs = Finding.objects.filter(status="FAIL").order_by("-inserted_at")[:25]
# Force evaluation
list(qs)
# Get the SQL
print(connection.queries[-1]['sql'])
```
#### Option B: Print Query Without Execution
```python
from api.models import Finding
queryset = Finding.objects.filter(status="FAIL")
print(queryset.query)
```
> **Note:** This won't include RLS filters, so the actual production query will differ.
#### Option C: Enable SQL Logging
Set `DJANGO_LOGGING_LEVEL=DEBUG` in your environment:
```bash
DJANGO_LOGGING_LEVEL=DEBUG poetry run python manage.py runserver
```
### 2. Run EXPLAIN ANALYZE
Connect to PostgreSQL and run:
```sql
EXPLAIN ANALYZE <your_query>;
```
Or with more details:
```sql
EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT) <your_query>;
```
#### Running EXPLAIN with RLS Context
To test with RLS enabled (as it runs in production), set the tenant context first:
```sql
-- Set tenant context
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
-- Then run your EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL' LIMIT 25;
```
### 3. Interpret the Results
#### Good Signs (Index is being used)
```
Index Scan using findings_tenant_status_idx on findings
Index Cond: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
Rows Removed by Filter: 0
Actual Rows: 150
Planning Time: 0.5 ms
Execution Time: 2.3 ms
```
#### Bad Signs (Sequential scan - no index)
```
Seq Scan on findings
Filter: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
Rows Removed by Filter: 999850
Actual Rows: 150
Planning Time: 0.3 ms
Execution Time: 450.2 ms
```
## Quick Reference: What to Look For
| What You See | Meaning | Action |
|--------------|---------|--------|
| `Index Scan` | Index is being used | Good, no action needed |
| `Index Only Scan` | Even better - data comes from index only | Good, no action needed |
| `Bitmap Index Scan` | Index used, results combined | Usually fine |
| `Seq Scan` on large tables | Full table scan, no index | **Needs investigation** |
| `Rows Removed by Filter: <high number>` | Fetching too many rows | **Query or index issue** |
| High `Execution Time` | Query is slow | **Needs optimization** |
## Common Issues and Fixes
### 1. Missing Index
**Problem:** `Seq Scan` on a filtered column
```sql
-- Bad: No index on status
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
-- Shows: Seq Scan on findings
```
**Fix:** Add an index
```python
# In your model
class Meta:
indexes = [
models.Index(fields=['status'], name='findings_status_idx'),
]
```
### 2. Index Not Used Due to Type Mismatch
**Problem:** Index exists but PostgreSQL doesn't use it
```sql
-- If tenant_id is UUID but you're passing a string without cast
WHERE tenant_id = 'some-uuid-string'
```
**Fix:** Ensure proper type casting in your queries
### 3. Index Not Used Due to Function Call
**Problem:** Wrapping column in a function prevents index usage
```sql
-- Bad: Index on inserted_at won't be used
WHERE DATE(inserted_at) = '2024-01-01'
-- Good: Use range instead
WHERE inserted_at >= '2024-01-01' AND inserted_at < '2024-01-02'
```
### 4. Wrong Index for Sorting
**Problem:** Query is sorted but index doesn't match sort order
```sql
-- If you have ORDER BY inserted_at DESC
-- You need an index with DESC or PostgreSQL will sort in memory
```
**Fix:** Create index with matching sort order
```python
class Meta:
indexes = [
models.Index(fields=['-inserted_at'], name='findings_inserted_desc_idx'),
]
```
### 5. Composite Index Column Order
**Problem:** Index exists but columns are in wrong order
```sql
-- Index on (tenant_id, scan_id)
-- This query WON'T use the index efficiently:
WHERE scan_id = '...'
-- This query WILL use the index:
WHERE tenant_id = '...' AND scan_id = '...'
```
**Rule:** The leftmost columns in a composite index must be in your WHERE clause.
## RLS (Row Level Security) Considerations
Prowler uses Row Level Security via PostgreSQL's `set_config`. When analyzing queries, remember:
1. RLS policies add implicit `WHERE tenant_id = current_tenant()` to queries
2. Always test with RLS enabled (how it runs in production)
3. Ensure `tenant_id` is the **first column** in composite indexes
### Using rls_transaction in Code
The `rls_transaction` context manager from `api.db_utils` sets the tenant context for all queries within its scope:
```python
from api.db_utils import rls_transaction
from api.models import Finding
with rls_transaction(tenant_id):
# All queries here will have RLS applied
qs = Finding.objects.filter(status="FAIL")
list(qs) # Execute
```
### Using RLS in Raw SQL (psql)
```sql
-- Set tenant context for the transaction
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
-- Now RLS policies will filter by this tenant
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
```
### Index Design for RLS
Since every query includes `tenant_id` via RLS, your composite indexes should **always start with `tenant_id`**:
```python
class Meta:
indexes = [
# Good: tenant_id first
models.Index(fields=['tenant_id', 'status', 'severity']),
# Bad: tenant_id not first - RLS queries won't use this efficiently
models.Index(fields=['status', 'tenant_id']),
]
```
## Test Data Requirements
The amount of test data you need depends on what you're testing. PostgreSQL's query planner considers table statistics, index definitions, and data distribution when choosing execution plans.
### Important Considerations
1. **Small datasets may not use indexes**: PostgreSQL may choose a sequential scan over an index scan if the table is small enough that scanning it directly is faster. This is expected behavior.
2. **Data must exist in the tables you're querying**: If your endpoint queries `findings`, `resources`, `scans`, or other tables, ensure those tables have data. Use the `findings` management command to generate test data:
```bash
cd api/src/backend
poetry run python manage.py findings \
--tenant <TENANT_ID> \
--findings 1000 \
--resources 500 \
--batch 500
```
3. **Update table statistics**: After inserting test data, run `ANALYZE` to update PostgreSQL's statistics:
```sql
ANALYZE findings;
ANALYZE resources;
ANALYZE scans;
-- Add other tables as needed
```
4. **Test with realistic data distribution**: If your query filters by a specific value (e.g., `status='FAIL'`), ensure your test data includes a realistic mix of values.
### When Index Usage Matters Most
Focus on validating index usage when:
- The table will have thousands or millions of rows in production
- The query is called frequently (list endpoints, dashboards)
- The query has multiple filters or joins
For small lookup tables or infrequently-called endpoints, sequential scans may be acceptable.
## Performance Checklist for PRs
Before submitting a PR that adds or modifies database queries:
- [ ] Profiled queries with Silk or `EXPLAIN ANALYZE`
- [ ] Verified indexes are being used (no unexpected `Seq Scan` on large tables)
- [ ] Checked `Rows Removed by Filter` is reasonable
- [ ] Tested with RLS enabled
- [ ] For critical endpoints: documented the query plan in the PR
## Useful Commands
### Update Table Statistics
```sql
ANALYZE findings;
ANALYZE resources;
```
### See Existing Indexes
```sql
SELECT indexname, indexdef
FROM pg_indexes
WHERE tablename = 'findings';
```
### See Index Usage Stats
```sql
SELECT
schemaname,
tablename,
indexname,
idx_scan,
idx_tup_read,
idx_tup_fetch
FROM pg_stat_user_indexes
WHERE tablename = 'findings'
ORDER BY idx_scan DESC;
```
### Check Table Size
```sql
SELECT
relname as table_name,
pg_size_pretty(pg_total_relation_size(relid)) as total_size
FROM pg_catalog.pg_statio_user_tables
WHERE relname IN ('findings', 'resources', 'scans')
ORDER BY pg_total_relation_size(relid) DESC;
```
## Working with Partitioned Tables
The `findings` and `resource_finding_mappings` tables are partitioned. When adding indexes, use the helper functions from `api.db_utils`:
### Adding Indexes to Partitions
```python
# In a migration file
from functools import partial
from django.db import migrations
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
class Migration(migrations.Migration):
atomic = False # Required for CONCURRENTLY
dependencies = [
("api", "XXXX_previous_migration"),
]
operations = [
migrations.RunPython(
partial(
create_index_on_partitions,
parent_table="findings",
index_name="my_new_idx",
columns="tenant_id, status, severity",
all_partitions=False, # Only current/future partitions
),
reverse_code=partial(
drop_index_on_partitions,
parent_table="findings",
index_name="my_new_idx",
),
),
]
```
### Parameters
- `all_partitions=False` (default): Only creates indexes on current and future partitions. Use this for new indexes to avoid maintenance overhead on old data.
- `all_partitions=True`: Creates indexes on all partitions. Use when migrating critical existing indexes.
See [Partitions Documentation](./partitions.md) for more details on partitioning strategy.
## Further Reading
- [Django Silk Documentation](https://github.com/jazzband/django-silk)
- [PostgreSQL EXPLAIN Documentation](https://www.postgresql.org/docs/current/sql-explain.html)
- [Using EXPLAIN](https://www.postgresql.org/docs/current/using-explain.html)
- [Index Types in PostgreSQL](https://www.postgresql.org/docs/current/indexes-types.html)
- [Prowler Partitions Documentation](./partitions.md)

5
api/poetry.lock generated
View File

@@ -6065,7 +6065,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
@@ -6074,7 +6073,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
@@ -6083,7 +6081,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
@@ -6092,7 +6089,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
@@ -6101,7 +6097,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},

View File

@@ -58,7 +58,7 @@ Before you begin, ensure you have:
### Authentication
Prowler supports multiple authentication methods for OCI. For detailed authentication setup, see the [OCI Authentication Guide](./authentication).
Prowler supports multiple authentication methods for OCI. For detailed authentication setup, see the [OCI Authentication Guide](./authentication.mdx).
**Note:** OCI Session Authentication and Config File Authentication both use the same `~/.oci/config` file. The difference is how the config file is generated - automatically via browser (session auth) or manually with API keys.
@@ -107,7 +107,7 @@ The easiest and most secure method is using OCI session authentication, which au
#### Alternative: Manual API Key Setup
If you prefer to manually generate API keys instead of using browser-based session authentication, see the detailed instructions in the [Authentication Guide](./authentication#config-file-authentication-manual-api-key-setup).
If you prefer to manually generate API keys instead of using browser-based session authentication, see the detailed instructions in the [Authentication Guide](./authentication.mdx#config-file-authentication-manual-api-key-setup).
**Note:** Both methods use the same `~/.oci/config` file - the difference is that manual setup uses static API keys while session authentication uses temporary session tokens.

View File

@@ -19,17 +19,12 @@ class CheckRemediation(MinimalSerializerMixin, BaseModel):
default=None,
description="Terraform code snippet with best practices for remediation",
)
nativeiac: str | None = Field(
default=None,
description="Native Infrastructure as Code code snippet with best practices for remediation",
recommendation_text: str | None = Field(
default=None, description="Text description with best practices"
)
other: str | None = Field(
recommendation_url: str | None = Field(
default=None,
description="Other remediation code snippet with best practices for remediation, usually used for web interfaces or other tools",
)
recommendation: str | None = Field(
default=None,
description="Text description with general best recommended practices to avoid the issue",
description="URL to external remediation documentation",
)
@@ -38,6 +33,9 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
model_config = ConfigDict(frozen=True)
check_id: str = Field(
description="Unique provider identifier for the security check (e.g., 's3_bucket_public_access')",
)
title: str = Field(
description="Human-readable title of the security check",
)
@@ -61,9 +59,9 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
default=None,
description="Remediation guidance including CLI commands and recommendations",
)
additional_urls: list[str] = Field(
default_factory=list,
description="List of additional URLs related to the check",
related_url: str | None = Field(
default=None,
description="URL to additional documentation or references",
)
categories: list[str] = Field(
default_factory=list,
@@ -81,23 +79,23 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
recommendation = remediation_data.get("recommendation", {})
remediation = CheckRemediation(
cli=code["cli"],
terraform=code["terraform"],
nativeiac=code["nativeiac"],
other=code["other"],
recommendation=recommendation["text"],
cli=code.get("cli"),
terraform=code.get("terraform"),
recommendation_text=recommendation.get("text"),
recommendation_url=recommendation.get("url"),
)
return cls(
check_id=data["checkid"],
title=data["checktitle"],
description=data["description"],
provider=data["provider"],
risk=data["risk"],
risk=data.get("risk"),
service=data["servicename"],
resource_type=data["resourcetype"],
remediation=remediation,
additional_urls=data["additionalurls"],
categories=data["categories"],
related_url=data.get("relatedurl"),
categories=data.get("categories", []),
)
@@ -118,36 +116,35 @@ class SimplifiedFinding(MinimalSerializerMixin, BaseModel):
severity: Literal["critical", "high", "medium", "low", "informational"] = Field(
description="Severity level of the finding",
)
check_id: str = Field(
description="ID of the security check that generated this finding",
check_metadata: CheckMetadata = Field(
description="Metadata about the security check that generated this finding",
)
status_extended: str = Field(
description="Extended status information providing additional context",
)
delta: Literal["new", "changed"] | None = Field(
default=None,
delta: Literal["new", "changed"] = Field(
description="Change status: 'new' (not seen before), 'changed' (modified since last scan), or None (unchanged)",
)
muted: bool | None = Field(
default=None,
muted: bool = Field(
description="Whether this finding has been muted/suppressed by the user",
)
muted_reason: str | None = Field(
muted_reason: str = Field(
default=None,
description="Reason provided when muting this finding",
description="Reason provided when muting this finding (3-500 chars if muted)",
)
@classmethod
def from_api_response(cls, data: dict) -> "SimplifiedFinding":
"""Transform JSON:API finding response to simplified format."""
attributes = data["attributes"]
check_metadata = attributes["check_metadata"]
return cls(
id=data["id"],
uid=attributes["uid"],
status=attributes["status"],
severity=attributes["severity"],
check_id=attributes["check_metadata"]["checkid"],
check_metadata=CheckMetadata.from_api_response(check_metadata),
status_extended=attributes["status_extended"],
delta=attributes["delta"],
muted=attributes["muted"],
@@ -182,9 +179,6 @@ class DetailedFinding(SimplifiedFinding):
default_factory=list,
description="List of UUIDs for cloud resources associated with this finding",
)
check_metadata: CheckMetadata = Field(
description="Metadata about the security check that generated this finding",
)
@classmethod
def from_api_response(cls, data: dict) -> "DetailedFinding":
@@ -210,7 +204,6 @@ class DetailedFinding(SimplifiedFinding):
uid=attributes["uid"],
status=attributes["status"],
severity=attributes["severity"],
check_id=check_metadata["checkid"],
check_metadata=CheckMetadata.from_api_response(check_metadata),
status_extended=attributes.get("status_extended"),
delta=attributes.get("delta"),

View File

@@ -19,9 +19,9 @@ class FindingsTools(BaseTool):
"""Tools for security findings operations.
Provides tools for:
- search_security_findings: Fast and lightweight searching across findings
- get_finding_details: Get complete details for a specific finding
- get_findings_overview: Get aggregate statistics and trends across all findings
- Searching and filtering security findings
- Getting detailed finding information
- Viewing findings overview/statistics
"""
async def search_security_findings(
@@ -90,27 +90,27 @@ class FindingsTools(BaseTool):
) -> dict[str, Any]:
"""Search and filter security findings across all cloud providers with rich filtering capabilities.
IMPORTANT: This tool returns LIGHTWEIGHT findings. Use this for fast searching and filtering across many findings.
For complete details use prowler_app_get_finding_details on specific findings.
This is the primary tool for browsing and filtering security findings. Returns lightweight findings
optimized for searching across large result sets. For detailed information about a specific finding,
use get_finding_details.
Default behavior:
- Returns latest findings from most recent scans (no date parameters needed)
- Filters to FAIL status only (security issues found)
- Returns 50 results per page
- Returns 100 results per page
Date filtering:
- Without dates: queries findings from the most recent completed scan across all providers (most efficient)
- With dates: queries historical findings (2-day maximum range between date_from and date_to)
- Without dates: queries findings from the most recent completed scan across all providers (most efficient). This returns the latest snapshot of findings, not a time-based query.
- With dates: queries historical findings (2-day maximum range)
Each finding includes:
- Core identification: id (UUID for get_finding_details), uid, check_id
- Security context: status (FAIL/PASS/MANUAL), severity (critical/high/medium/low/informational)
- State tracking: delta (new/changed/unchanged), muted (boolean), muted_reason
- Extended details: status_extended with additional context
- Core identification: id, uid, check_id
- Security context: status, severity, check_metadata (title, description, remediation)
- State tracking: delta (new/changed), muted status
- Extended details: status_extended for additional context
Workflow:
1. Use this tool to search and filter findings by severity, status, provider, service, region, etc.
2. Use prowler_app_get_finding_details with the finding 'id' to get complete information about the finding
Returns:
Paginated list of simplified findings with total count and pagination metadata
"""
# Validate page_size parameter
self.api_client.validate_page_size(page_size)
@@ -185,39 +185,21 @@ class FindingsTools(BaseTool):
) -> dict[str, Any]:
"""Retrieve comprehensive details about a specific security finding by its ID.
IMPORTANT: This tool returns COMPLETE finding details.
Use this after finding a specific finding via prowler_app_search_security_findings
This tool provides MORE detailed information than search_security_findings. Use this when you need
to deeply analyze a specific finding or understand its complete context and history.
This tool provides ALL information that prowler_app_search_security_findings returns PLUS:
1. Check Metadata (information about the check script that generated the finding):
- title: Human-readable phrase used to summarize the check
- description: Detailed explanation of what the check validates and why it is important
- risk: What could happen if this check fails
- remediation: Complete remediation guidance including step-by-step instructions and code snippets with best practices to fix the issue:
* cli: Command-line commands to fix the issue
* terraform: Terraform code snippets with best practices
* nativeiac: Provider native Infrastructure as Code code snippets with best practices to fix the issue
* other: Other remediation code snippets with best practices, usually used for web interfaces or other tools
* recommendation: Text description with general best recommended practices to avoid the issue
- provider: Cloud provider (aws/azure/gcp/etc)
- service: Service name (s3/ec2/keyvault/etc)
- resource_type: Resource type being evaluated
- categories: Security categories this check belongs to
- additional_urls: List of additional URLs related to the check
2. Temporal Metadata:
- inserted_at: When this finding was first inserted into database
- updated_at: When this finding was last updated
- first_seen_at: When this finding was first detected across all scans
3. Relationships:
- scan_id: UUID of the scan that generated this finding
- resource_ids: List of UUIDs for cloud resources associated with this finding
Additional information compared to search_security_findings:
- Temporal metadata: when the finding was first seen, inserted, and last updated
- Scan relationship: ID of the scan that generated this finding
- Resource relationships: IDs of all cloud resources associated with this finding
Workflow:
1. Use prowler_app_search_security_findings to browse and filter findings
2. Use this tool with the finding 'id' to get remediation guidance and complete context
1. Use search_security_findings to browse and filter across many findings
2. Use get_finding_details to drill down into specific findings of interest
Returns:
dict containing detailed finding with comprehensive security metadata, temporal information,
and relationships to scans and resources
"""
params = {
# Return comprehensive fields including temporal metadata
@@ -243,31 +225,26 @@ class FindingsTools(BaseTool):
description="Filter statistics by cloud provider. Multiple values allowed. If empty, all providers are returned. For valid values, please refer to Prowler Hub/Prowler Documentation that you can also find in form of tools in this MCP Server.",
),
) -> dict[str, Any]:
"""Get aggregate statistics and trends about security findings as a markdown report.
"""Get high-level statistics about security findings formatted as a human-readable markdown report.
This tool provides a HIGH-LEVEL OVERVIEW without retrieving individual findings. Use this when you
need to understand the overall security posture, trends, or remediation progress across all findings.
Use this tool to get a quick overview of your security posture without retrieving individual findings.
Perfect for understanding trends, identifying areas of concern, and tracking improvements over time.
The markdown report includes:
The report includes:
- Summary statistics: total findings, fail/pass/muted counts with percentages
- Delta analysis: breakdown of new vs changed findings
- Trending information: how findings are evolving over time
1. Summary Statistics:
- Total number of findings
- Failed checks (security issues) with percentage
- Passed checks (no issues) with percentage
- Muted findings (user-suppressed) with percentage
Output format: Markdown-formatted report ready to present to users or include in documentation.
2. Delta Analysis (Change Tracking):
- New findings: never seen before in previous scans
* Broken down by: new failures, new passes, new muted
- Changed findings: status changed since last scan
* Broken down by: changed to fail, changed to pass, changed to muted
- Unchanged findings: same status as previous scan
Use cases:
- Quick security posture assessment
- Tracking remediation progress over time
- Identifying which providers have most issues
- Understanding finding trends (improving or degrading)
This helps answer questions like:
- "What's my overall security posture?"
- "How many critical security issues do I have?"
- "Are we improving or getting worse over time?"
- "How many new security issues appeared since last scan?"
Returns:
Dictionary with 'report' key containing markdown-formatted summary statistics
"""
params = {
# Return only LLM-relevant aggregate statistics

9
poetry.lock generated
View File

@@ -2923,8 +2923,6 @@ python-versions = "*"
groups = ["dev"]
files = [
{file = "jsonpath-ng-1.7.0.tar.gz", hash = "sha256:f6f5f7fd4e5ff79c785f1573b394043b39849fb2bb47bcead935d12b00beab3c"},
{file = "jsonpath_ng-1.7.0-py2-none-any.whl", hash = "sha256:898c93fc173f0c336784a3fa63d7434297544b7198124a68f9a3ef9597b0ae6e"},
{file = "jsonpath_ng-1.7.0-py3-none-any.whl", hash = "sha256:f3d7f9e848cba1b6da28c55b1c26ff915dc9e0b1ba7e752a53d6da8d5cbd00b6"},
]
[package.dependencies]
@@ -5515,7 +5513,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
@@ -5524,7 +5521,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
@@ -5533,7 +5529,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
@@ -5542,7 +5537,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
@@ -5551,7 +5545,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
@@ -6460,4 +6453,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">3.9.1,<3.13"
content-hash = "1559a8799915bf0372eef07396e1dc40802911ef07ae92997cd260d9fe596ba3"
content-hash = "433468987cb3c4499d094d90e9f8cc9062a25ce115fde991a4e1b39edbfb7815"

View File

@@ -11,11 +11,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `compute_instance_preemptible_vm_disabled` check for GCP provider [(#9342)](https://github.com/prowler-cloud/prowler/pull/9342)
- `compute_instance_automatic_restart_enabled` check for GCP provider [(#9271)](https://github.com/prowler-cloud/prowler/pull/9271)
- `compute_instance_deletion_protection_enabled` check for GCP provider [(#9358)](https://github.com/prowler-cloud/prowler/pull/9358)
- Update SOC2 - Azure with Processing Integrity requirements [(#9463)](https://github.com/prowler-cloud/prowler/pull/9463)
- Update SOC2 - GCP with Processing Integrity requirements [(#9464)](https://github.com/prowler-cloud/prowler/pull/9464)
- Update SOC2 - AWS with Processing Integrity requirements [(#9462)](https://github.com/prowler-cloud/prowler/pull/9462)
- RBI Cyber Security Framework compliance for Azure provider [(#8822)](https://github.com/prowler-cloud/prowler/pull/8822)
### Changed
- Update AWS Macie service metadata to new format [(#9265)](https://github.com/prowler-cloud/prowler/pull/9265)
@@ -28,19 +23,10 @@ All notable changes to the **Prowler SDK** are documented in this file.
---
## [v5.14.3] (Prowler UNRELEASED)
### Fixed
- Fix duplicate requirement IDs in ISO 27001:2013 AWS compliance framework by adding unique letter suffixes
- Removed incorrect threat-detection category from checks metadata [(#9489)](https://github.com/prowler-cloud/prowler/pull/9489)
---
## [v5.14.2] (Prowler 5.14.2)
## [v5.14.2] (Prowler UNRELEASED)
### Fixed
- Custom check folder metadata validation [(#9335)](https://github.com/prowler-cloud/prowler/pull/9335)
- Pin `alibabacloud-gateway-oss-util` to version 0.0.3 to address missing dependency [(#9487)](https://github.com/prowler-cloud/prowler/pull/9487)
---

File diff suppressed because it is too large Load Diff

View File

@@ -547,106 +547,6 @@
"cloudwatch_log_group_retention_policy_specific_days_enabled",
"kinesis_stream_data_retention_period"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"apigateway_restapi_logging_enabled",
"apigatewayv2_api_access_logging_enabled",
"elbv2_logging_enabled",
"elb_logging_enabled",
"wafv2_webacl_logging_enabled",
"waf_global_webacl_logging_enabled",
"cloudtrail_s3_dataevents_write_enabled",
"cloudfront_distributions_logging_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"cloudtrail_multi_region_enabled",
"cloudtrail_log_file_validation_enabled",
"cloudtrail_cloudwatch_logging_enabled",
"cloudwatch_log_metric_filter_unauthorized_api_calls",
"cloudwatch_log_metric_filter_authentication_failures",
"cloudwatch_log_metric_filter_policy_changes",
"cloudwatch_log_metric_filter_root_usage",
"config_recorder_all_regions_enabled",
"rds_instance_integration_cloudwatch_logs",
"rds_cluster_integration_cloudwatch_logs",
"glue_etl_jobs_logging_enabled",
"stepfunctions_statemachine_logging_enabled"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"s3_bucket_default_encryption",
"s3_bucket_kms_encryption",
"cloudwatch_log_group_kms_encryption_enabled",
"sns_topics_kms_encryption_at_rest_enabled",
"kinesis_stream_encrypted_at_rest",
"cloudfront_distributions_field_level_encryption_enabled",
"cloudwatch_log_group_not_publicly_accessible",
"cloudwatch_cross_account_sharing_disabled",
"glue_etl_jobs_cloudwatch_logs_encryption_enabled",
"glue_etl_jobs_amazon_s3_encryption_enabled"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"s3_bucket_object_versioning",
"s3_bucket_object_lock",
"rds_instance_storage_encrypted",
"rds_cluster_storage_encrypted",
"dynamodb_tables_kms_cmk_encryption_enabled",
"ec2_ebs_volume_encryption",
"backup_plans_exist",
"backup_recovery_point_encrypted",
"backup_vaults_encrypted",
"kms_cmk_rotation_enabled"
]
}
]
}

View File

@@ -1,248 +0,0 @@
{
"Framework": "RBI-Cyber-Security-Framework",
"Name": "Reserve Bank of India (RBI) Cyber Security Framework",
"Version": "",
"Provider": "Azure",
"Description": "The Reserve Bank had prescribed a set of baseline cyber security controls for primary (Urban) cooperative banks (UCBs) in October 2018. On further examination, it has been decided to prescribe a comprehensive cyber security framework for the UCBs, as a graded approach, based on their digital depth and interconnectedness with the payment systems landscape, digital products offered by them and assessment of cyber security risk. The framework would mandate implementation of progressively stronger security measures based on the nature, variety and scale of digital product offerings of banks.",
"Requirements": [
{
"Id": "annex_i_1_1",
"Name": "Annex I (1.1)",
"Description": "UCBs should maintain an up-to-date business IT Asset Inventory Register containing the following fields, as a minimum: a) Details of the IT Asset (viz., hardware/software/network devices, key personnel, services, etc.), b. Details of systems where customer data are stored, c. Associated business applications, if any, d. Criticality of the IT asset (For example, High/Medium/Low).",
"Attributes": [
{
"ItemId": "annex_i_1_1",
"Service": "vm"
}
],
"Checks": [
"vm_ensure_using_approved_images",
"vm_ensure_using_managed_disks",
"vm_trusted_launch_enabled",
"aks_cluster_rbac_enabled",
"aks_clusters_created_with_private_nodes",
"appinsights_ensure_is_configured",
"containerregistry_admin_user_disabled"
]
},
{
"Id": "annex_i_1_3",
"Name": "Annex I (1.3)",
"Description": "Appropriately manage and provide protection within and outside UCB/network, keeping in mind how the data/information is stored, transmitted, processed, accessed and put to use within/outside the UCB's network, and level of risk they are exposed to depending on the sensitivity of the data/information.",
"Attributes": [
{
"ItemId": "annex_i_1_3",
"Service": "azure"
}
],
"Checks": [
"keyvault_key_rotation_enabled",
"keyvault_access_only_through_private_endpoints",
"keyvault_private_endpoints",
"keyvault_rbac_enabled",
"app_function_not_publicly_accessible",
"app_ensure_http_is_redirected_to_https",
"app_minimum_tls_version_12",
"storage_blob_public_access_level_is_disabled",
"storage_secure_transfer_required_is_enabled",
"storage_ensure_encryption_with_customer_managed_keys",
"storage_ensure_minimum_tls_version_12",
"storage_default_network_access_rule_is_denied",
"storage_ensure_private_endpoints_in_storage_accounts",
"network_ssh_internet_access_restricted",
"sqlserver_unrestricted_inbound_access",
"sqlserver_tde_encryption_enabled",
"sqlserver_tde_encrypted_with_cmk",
"cosmosdb_account_use_private_endpoints",
"cosmosdb_account_firewall_use_selected_networks",
"mysql_flexible_server_ssl_connection_enabled",
"mysql_flexible_server_minimum_tls_version_12",
"postgresql_flexible_server_enforce_ssl_enabled",
"aks_clusters_public_access_disabled",
"containerregistry_not_publicly_accessible",
"containerregistry_uses_private_link",
"aisearch_service_not_publicly_accessible"
]
},
{
"Id": "annex_i_5_1",
"Name": "Annex I (5.1)",
"Description": "The firewall configurations should be set to the highest security level and evaluation of critical device (such as firewall, network switches, security devices, etc.) configurations should be done periodically.",
"Attributes": [
{
"ItemId": "annex_i_5_1",
"Service": "network"
}
],
"Checks": [
"network_rdp_internet_access_restricted",
"network_http_internet_access_restricted",
"network_udp_internet_access_restricted",
"network_ssh_internet_access_restricted",
"network_flow_log_captured_sent",
"network_flow_log_more_than_90_days",
"network_watcher_enabled",
"network_bastion_host_exists",
"aks_network_policy_enabled",
"storage_default_network_access_rule_is_denied"
]
},
{
"Id": "annex_i_6",
"Name": "Annex I (6)",
"Description": "Put in place systems and processes to identify, track, manage and monitor the status of patches to servers, operating system and application software running at the systems used by the UCB officials (end-users). Implement and update antivirus protection for all servers and applicable end points preferably through a centralised system.",
"Attributes": [
{
"ItemId": "annex_i_6",
"Service": "defender"
}
],
"Checks": [
"defender_ensure_system_updates_are_applied",
"defender_assessments_vm_endpoint_protection_installed",
"defender_ensure_defender_for_server_is_on",
"defender_ensure_defender_for_app_services_is_on",
"defender_ensure_defender_for_sql_servers_is_on",
"defender_ensure_defender_for_azure_sql_databases_is_on",
"defender_ensure_defender_for_storage_is_on",
"defender_ensure_defender_for_containers_is_on",
"defender_ensure_defender_for_keyvault_is_on",
"defender_ensure_defender_for_arm_is_on",
"defender_ensure_defender_for_dns_is_on",
"defender_ensure_defender_for_databases_is_on",
"defender_ensure_defender_for_cosmosdb_is_on",
"defender_container_images_scan_enabled",
"defender_container_images_resolved_vulnerabilities",
"defender_auto_provisioning_vulnerabilty_assessments_machines_on",
"vm_backup_enabled",
"app_ensure_java_version_is_latest",
"app_ensure_php_version_is_latest",
"app_ensure_python_version_is_latest"
]
},
{
"Id": "annex_i_7_1",
"Name": "Annex I (7.1)",
"Description": "Disallow administrative rights on end-user workstations/PCs/laptops and provide access rights on a 'need to know' and 'need to do' basis.",
"Attributes": [
{
"ItemId": "annex_i_7_1",
"Service": "iam"
}
],
"Checks": [
"iam_role_user_access_admin_restricted",
"iam_subscription_roles_owner_custom_not_created",
"iam_custom_role_has_permissions_to_administer_resource_locks",
"entra_global_admin_in_less_than_five_users",
"entra_policy_ensure_default_user_cannot_create_apps",
"entra_policy_ensure_default_user_cannot_create_tenants",
"entra_policy_default_users_cannot_create_security_groups",
"entra_policy_guest_invite_only_for_admin_roles",
"entra_policy_guest_users_access_restrictions",
"app_function_identity_without_admin_privileges"
]
},
{
"Id": "annex_i_7_2",
"Name": "Annex I (7.2)",
"Description": "Passwords should be set as complex and lengthy and users should not use same passwords for all the applications/systems/devices.",
"Attributes": [
{
"ItemId": "annex_i_7_2",
"Service": "entra"
}
],
"Checks": [
"entra_non_privileged_user_has_mfa",
"entra_privileged_user_has_mfa",
"entra_policy_user_consent_for_verified_apps",
"entra_policy_restricts_user_consent_for_apps",
"entra_user_with_vm_access_has_mfa",
"entra_security_defaults_enabled",
"entra_conditional_access_policy_require_mfa_for_management_api",
"entra_trusted_named_locations_exists",
"sqlserver_azuread_administrator_enabled",
"postgresql_flexible_server_entra_id_authentication_enabled",
"cosmosdb_account_use_aad_and_rbac"
]
},
{
"Id": "annex_i_7_3",
"Name": "Annex I (7.3)",
"Description": "Remote Desktop Protocol (RDP) which allows others to access the computer remotely over a network or over the internet should be always disabled and should be enabled only with the approval of the authorised officer of the UCB. Logs for such remote access shall be enabled and monitored for suspicious activities.",
"Attributes": [
{
"ItemId": "annex_i_7_3",
"Service": "network"
}
],
"Checks": [
"network_rdp_internet_access_restricted",
"vm_jit_access_enabled",
"network_bastion_host_exists",
"vm_linux_enforce_ssh_authentication"
]
},
{
"Id": "annex_i_7_4",
"Name": "Annex I (7.4)",
"Description": "Implement appropriate (e.g. centralised) systems and controls to allow, manage, log and monitor privileged/super user/administrative access to critical systems (servers/databases, applications, network devices etc.)",
"Attributes": [
{
"ItemId": "annex_i_7_4",
"Service": "monitor"
}
],
"Checks": [
"monitor_alert_create_update_nsg",
"monitor_alert_delete_nsg",
"monitor_diagnostic_setting_with_appropriate_categories",
"monitor_diagnostic_settings_exists",
"monitor_alert_create_policy_assignment",
"monitor_alert_delete_policy_assignment",
"monitor_alert_create_update_security_solution",
"monitor_alert_delete_security_solution",
"monitor_alert_create_update_sqlserver_fr",
"monitor_alert_delete_sqlserver_fr",
"monitor_alert_create_update_public_ip_address_rule",
"monitor_alert_delete_public_ip_address_rule",
"monitor_alert_service_health_exists",
"monitor_storage_account_with_activity_logs_cmk_encrypted",
"monitor_storage_account_with_activity_logs_is_private",
"keyvault_logging_enabled",
"sqlserver_auditing_enabled",
"sqlserver_auditing_retention_90_days",
"app_http_logs_enabled",
"app_function_application_insights_enabled",
"defender_additional_email_configured_with_a_security_contact",
"defender_ensure_notify_alerts_severity_is_high",
"defender_ensure_notify_emails_to_owners",
"defender_ensure_mcas_is_enabled",
"defender_ensure_wdatp_is_enabled"
]
},
{
"Id": "annex_i_12",
"Name": "Annex I (12)",
"Description": "Take periodic back up of the important data and store this data 'off line' (i.e., transferring important files to a storage device that can be detached from a computer/system after copying all the files).",
"Attributes": [
{
"ItemId": "annex_i_12",
"Service": "azure"
}
],
"Checks": [
"vm_backup_enabled",
"vm_sufficient_daily_backup_retention_period",
"storage_ensure_file_shares_soft_delete_is_enabled",
"storage_blob_versioning_is_enabled",
"storage_ensure_soft_delete_is_enabled",
"storage_geo_redundant_enabled",
"keyvault_recoverable",
"sqlserver_vulnerability_assessment_enabled",
"sqlserver_va_periodic_recurring_scans_enabled"
]
}
]
}

View File

@@ -619,92 +619,6 @@
"sqlserver_auditing_retention_90_days",
"storage_ensure_soft_delete_is_enabled"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"app_http_logs_enabled",
"network_flow_log_captured_sent",
"keyvault_logging_enabled",
"monitor_diagnostic_settings_exists",
"sqlserver_auditing_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"monitor_diagnostic_setting_with_appropriate_categories",
"monitor_diagnostic_settings_exists",
"defender_auto_provisioning_log_analytics_agent_vms_on",
"mysql_flexible_server_audit_log_enabled",
"postgresql_flexible_server_log_checkpoints_on",
"postgresql_flexible_server_log_connections_on",
"postgresql_flexible_server_log_disconnections_on",
"network_flow_log_more_than_90_days"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"storage_ensure_encryption_with_customer_managed_keys",
"storage_infrastructure_encryption_is_enabled",
"monitor_storage_account_with_activity_logs_cmk_encrypted",
"monitor_storage_account_with_activity_logs_is_private",
"sqlserver_tde_encryption_enabled",
"sqlserver_tde_encrypted_with_cmk"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"storage_ensure_encryption_with_customer_managed_keys",
"storage_infrastructure_encryption_is_enabled",
"storage_ensure_soft_delete_is_enabled",
"vm_ensure_attached_disks_encrypted_with_cmk",
"vm_ensure_unattached_disks_encrypted_with_cmk",
"keyvault_key_rotation_enabled",
"keyvault_recoverable"
]
}
]
}
}

View File

@@ -492,87 +492,6 @@
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"compute_loadbalancer_logging_enabled",
"compute_subnet_flow_logs_enabled",
"logging_sink_created",
"iam_audit_logs_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"logging_log_metric_filter_and_alert_for_audit_configuration_changes_enabled",
"logging_log_metric_filter_and_alert_for_project_ownership_changes_enabled",
"logging_log_metric_filter_and_alert_for_sql_instance_configuration_changes_enabled",
"cloudsql_instance_postgres_log_connections_flag",
"cloudsql_instance_postgres_log_disconnections_flag",
"cloudsql_instance_postgres_log_statement_flag",
"iam_audit_logs_enabled"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"cloudstorage_bucket_uniform_bucket_level_access",
"bigquery_dataset_cmk_encryption",
"bigquery_table_cmk_encryption",
"compute_instance_confidential_computing_enabled",
"pubsub_topic_encryption_with_cmk"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock",
"cloudsql_instance_automated_backups",
"compute_instance_encryption_with_csek_enabled",
"kms_key_rotation_enabled",
"dataproc_encrypted_with_cmks_disabled"
]
}
]
}
}

View File

@@ -29,7 +29,9 @@
"Url": "https://hub.prowler.com/check/apigateway_restapi_waf_acl_attached"
}
},
"Categories": [],
"Categories": [
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "",

View File

@@ -33,7 +33,7 @@
}
},
"Categories": [
"forensics-ready"
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -34,7 +34,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -35,7 +35,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -32,7 +32,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -29,7 +29,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -36,7 +36,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -34,7 +34,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -32,7 +32,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -32,7 +32,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -38,7 +38,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -33,7 +33,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -37,7 +37,8 @@
}
},
"Categories": [
"logging"
"logging",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -32,7 +32,8 @@
}
},
"Categories": [
"trust-boundaries"
"trust-boundaries",
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],

View File

@@ -78,7 +78,6 @@ dependencies = [
"alibabacloud_ecs20140526==7.2.5",
"alibabacloud_sas20181203==6.1.0",
"alibabacloud_oss20190517==1.0.6",
"alibabacloud-gateway-oss-util==0.0.3",
"alibabacloud_actiontrail20200706==2.4.1",
"alibabacloud_cs20151215==6.1.0",
"alibabacloud-rds20140815==12.0.0",

View File

@@ -62,7 +62,7 @@ You are a code reviewer for the Prowler UI project. Analyze the full file conten
**RULES TO CHECK:**
1. React Imports: NO `import * as React` or `import React, {` → Use `import { useState }`
2. TypeScript: NO union types like `type X = "a" | "b"` → Use const-based: `const X = {...} as const`
3. Tailwind: NO `var()` or hex colors in className → Use Tailwind utilities and semantic color classes. Exception: `var()` is allowed when passing colors to chart/graph components that require CSS color strings (not Tailwind classes) for their APIs.
3. Tailwind: NO `var()` or hex colors in className → Use Tailwind utilities and semantic color classes.
4. cn(): Use for merging multiple classes or for conditionals (handles Tailwind conflicts with twMerge) → `cn(BUTTON_STYLES.base, BUTTON_STYLES.active, isLoading && "opacity-50")`
5. React 19: NO `useMemo`/`useCallback` without reason
6. Zod v4: Use `.min(1)` not `.nonempty()`, `z.email()` not `z.string().email()`. All inputs must be validated with Zod.

View File

@@ -24,13 +24,6 @@ All notable changes to the **Prowler UI** are documented in this file.
---
## [1.14.3] (Prowler Unreleased)
### 🐞 Fixed
- Show top failed requirements in compliance specific view for compliance without sections [(#9471)](https://github.com/prowler-cloud/prowler/pull/9471)
---
## [1.14.2] (Prowler v5.14.2)
### 🐞 Fixed

View File

@@ -267,10 +267,6 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<h3 className="text-text-neutral-primary text-lg font-semibold">
Risk Plot
</h3>
<p className="text-text-neutral-tertiary mt-1 text-xs">
Threat Score is severity-weighted, not quantity-based. Higher
severity findings have greater impact on the score.
</p>
</div>
<div className="relative min-h-[400px] w-full flex-1">
@@ -302,9 +298,9 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<YAxis
type="number"
dataKey="y"
name="Fail Findings"
name="Failed Findings"
label={{
value: "Fail Findings",
value: "Failed Findings",
angle: -90,
position: "left",
offset: 10,
@@ -342,7 +338,7 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
{/* Interactive Legend - below chart */}
<div className="mt-4 flex flex-col items-start gap-2">
<p className="text-text-neutral-tertiary pl-2 text-xs">
Click to filter by provider
Click to filter by provider.
</p>
<ChartLegend
items={providers.map((p) => ({
@@ -367,7 +363,7 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
{selectedPoint.name}
</h4>
<p className="text-text-neutral-tertiary text-xs">
Threat Score: {selectedPoint.x}% | Fail Findings:{" "}
Threat Score: {selectedPoint.x}% | Failed Findings:{" "}
{selectedPoint.y}
</p>
</div>

View File

@@ -7,6 +7,7 @@ import { getSeverityTrendsByTimeRange } from "@/actions/overview/severity-trends
import { LineChart } from "@/components/graphs/line-chart";
import { LineConfig, LineDataPoint } from "@/components/graphs/types";
import {
MUTED_COLOR,
SEVERITY_LEVELS,
SEVERITY_LINE_CONFIGS,
SeverityLevel,
@@ -39,9 +40,6 @@ export const FindingSeverityOverTime = ({
const params = new URLSearchParams();
params.set("filter[inserted_at]", point.date);
// Always filter by FAIL status since this chart shows failed findings
params.set("filter[status__in]", "FAIL");
// Add scan_ids filter
if (
point.scan_ids &&
@@ -99,6 +97,15 @@ export const FindingSeverityOverTime = ({
// Build line configurations from shared severity configs
const lines: LineConfig[] = [...SEVERITY_LINE_CONFIGS];
// Only add muted line if data contains it
if (data.some((item) => item.muted !== undefined)) {
lines.push({
dataKey: "muted",
color: MUTED_COLOR,
label: "Muted",
});
}
// Calculate x-axis interval based on data length to show all labels without overlap
const getXAxisInterval = (): number => {
const dataLength = data.length;

View File

@@ -195,7 +195,7 @@ const SSRComplianceContent = async ({
{ pass: 0, fail: 0, manual: 0 },
);
const accordionItems = mapper.toAccordionItems(data, scanId);
const topFailedResult = mapper.getTopFailedSections(data);
const topFailedSections = mapper.getTopFailedSections(data);
return (
<div className="flex flex-col gap-8">
@@ -205,10 +205,7 @@ const SSRComplianceContent = async ({
fail={totalRequirements.fail}
manual={totalRequirements.manual}
/>
<TopFailedSectionsCard
sections={topFailedResult.items}
dataType={topFailedResult.type}
/>
<TopFailedSectionsCard sections={topFailedSections} />
{/* <SectionsFailureRateCard categories={categoryHeatmapData} /> */}
</div>

View File

@@ -3,20 +3,14 @@
import { HorizontalBarChart } from "@/components/graphs/horizontal-bar-chart";
import { BarDataPoint } from "@/components/graphs/types";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/shadcn";
import {
FailedSection,
TOP_FAILED_DATA_TYPE,
TopFailedDataType,
} from "@/types/compliance";
import { FailedSection } from "@/types/compliance";
interface TopFailedSectionsCardProps {
sections: FailedSection[];
dataType?: TopFailedDataType;
}
export function TopFailedSectionsCard({
sections,
dataType = TOP_FAILED_DATA_TYPE.SECTIONS,
}: TopFailedSectionsCardProps) {
// Transform FailedSection[] to BarDataPoint[]
const total = sections.reduce((sum, section) => sum + section.total, 0);
@@ -28,18 +22,13 @@ export function TopFailedSectionsCard({
color: "var(--bg-fail-primary)",
}));
const title =
dataType === TOP_FAILED_DATA_TYPE.REQUIREMENTS
? "Top Failed Requirements"
: "Top Failed Sections";
return (
<Card
variant="base"
className="flex min-h-[372px] w-full flex-col sm:min-w-[500px]"
>
<CardHeader>
<CardTitle>{title}</CardTitle>
<CardTitle>Top Failed Sections</CardTitle>
</CardHeader>
<CardContent className="flex flex-1 items-center justify-start">
<HorizontalBarChart data={barData} />

View File

@@ -68,31 +68,10 @@ const CustomLineTooltip = ({
const typedPayload = payload as unknown as TooltipPayloadItem[];
// Filter payload if a line is selected or hovered
const filteredPayload = filterLine
const displayPayload = filterLine
? typedPayload.filter((item) => item.dataKey === filterLine)
: typedPayload;
// Sort by severity order: critical, high, medium, low, informational
const severityOrder = [
"critical",
"high",
"medium",
"low",
"informational",
] as const;
const displayPayload = [...filteredPayload].sort((a, b) => {
const aIndex = severityOrder.indexOf(
a.dataKey as (typeof severityOrder)[number],
);
const bIndex = severityOrder.indexOf(
b.dataKey as (typeof severityOrder)[number],
);
// Items not in severityOrder go to the end
if (aIndex === -1) return 1;
if (bIndex === -1) return -1;
return aIndex - bIndex;
});
if (displayPayload.length === 0) {
return null;
}
@@ -117,17 +96,12 @@ const CustomLineTooltip = ({
return (
<div key={item.dataKey} className="space-y-1">
<div className="flex items-center justify-between gap-4">
<div className="flex items-center gap-2">
<div
className="h-2 w-2 rounded-full"
style={{ backgroundColor: item.stroke }}
/>
<span className="text-text-neutral-secondary text-sm">
{item.name}
</span>
</div>
<span className="text-text-neutral-primary text-sm font-medium">
<div className="flex items-center gap-2">
<div
className="h-2 w-2 rounded-full"
style={{ backgroundColor: item.stroke }}
/>
<span className="text-text-neutral-primary text-sm">
{item.value}
</span>
</div>
@@ -286,7 +260,7 @@ export function LineChart({
<div className="mt-4 flex flex-col items-start gap-2">
<p className="text-text-neutral-tertiary pl-2 text-xs">
Click to filter by severity
Click to filter by severity.
</p>
<ChartLegend
items={legendItems}

View File

@@ -1,65 +1,14 @@
import {
Category,
CategoryData,
Control,
FailedSection,
Framework,
Requirement,
REQUIREMENT_STATUS,
RequirementItemData,
RequirementsData,
RequirementStatus,
TOP_FAILED_DATA_TYPE,
TopFailedDataType,
TopFailedResult,
} from "@/types/compliance";
// Type for the internal map used in getTopFailedSections
interface FailedSectionData {
total: number;
types: Record<string, number>;
}
/**
* Builds the TopFailedResult from the accumulated map data
*/
const buildTopFailedResult = (
map: Map<string, FailedSectionData>,
type: TopFailedDataType,
): TopFailedResult => ({
items: Array.from(map.entries())
.map(([name, data]): FailedSection => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5),
type,
});
/**
* Checks if the framework uses a flat structure (requirements directly on framework)
* vs hierarchical structure (categories -> controls -> requirements)
*/
const hasFlatStructure = (frameworks: Framework[]): boolean =>
frameworks.some(
(framework) =>
(framework.requirements?.length ?? 0) > 0 &&
framework.categories.length === 0,
);
/**
* Increments the failed count for a given name in the map
*/
const incrementFailedCount = (
map: Map<string, FailedSectionData>,
name: string,
type: string,
): void => {
if (!map.has(name)) {
map.set(name, { total: 0, types: {} });
}
const data = map.get(name)!;
data.total += 1;
data.types[type] = (data.types[type] || 0) + 1;
};
export const updateCounters = (
target: { pass: number; fail: number; manual: number },
status: RequirementStatus,
@@ -75,45 +24,38 @@ export const updateCounters = (
export const getTopFailedSections = (
mappedData: Framework[],
): TopFailedResult => {
const failedSectionMap = new Map<string, FailedSectionData>();
): FailedSection[] => {
const failedSectionMap = new Map();
if (hasFlatStructure(mappedData)) {
// Handle flat structure: count failed requirements directly
mappedData.forEach((framework) => {
const directRequirements = framework.requirements ?? [];
directRequirements.forEach((requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const type =
typeof requirement.type === "string" ? requirement.type : "Fails";
incrementFailedCount(failedSectionMap, requirement.name, type);
}
});
});
return buildTopFailedResult(
failedSectionMap,
TOP_FAILED_DATA_TYPE.REQUIREMENTS,
);
}
// Handle hierarchical structure: count by category (section)
mappedData.forEach((framework) => {
framework.categories.forEach((category) => {
category.controls.forEach((control) => {
control.requirements.forEach((requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const type =
typeof requirement.type === "string" ? requirement.type : "Fails";
incrementFailedCount(failedSectionMap, category.name, type);
const sectionName = category.name;
if (!failedSectionMap.has(sectionName)) {
failedSectionMap.set(sectionName, { total: 0, types: {} });
}
const sectionData = failedSectionMap.get(sectionName);
sectionData.total += 1;
const type = requirement.type || "Fails";
sectionData.types[type as string] =
(sectionData.types[type as string] || 0) + 1;
}
});
});
});
});
return buildTopFailedResult(failedSectionMap, TOP_FAILED_DATA_TYPE.SECTIONS);
// Convert in descending order and slice top 5
return Array.from(failedSectionMap.entries())
.map(([name, data]) => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5); // Top 5
};
export const calculateCategoryHeatmapData = (
@@ -204,9 +146,9 @@ export const findOrCreateFramework = (
};
export const findOrCreateCategory = (
categories: Category[],
categories: any[],
categoryName: string,
): Category => {
) => {
let category = categories.find((c) => c.name === categoryName);
if (!category) {
category = {
@@ -221,10 +163,7 @@ export const findOrCreateCategory = (
return category;
};
export const findOrCreateControl = (
controls: Control[],
controlLabel: string,
): Control => {
export const findOrCreateControl = (controls: any[], controlLabel: string) => {
let control = controls.find((c) => c.label === controlLabel);
if (!control) {
control = {
@@ -239,7 +178,7 @@ export const findOrCreateControl = (
return control;
};
export const calculateFrameworkCounters = (frameworks: Framework[]): void => {
export const calculateFrameworkCounters = (frameworks: Framework[]) => {
frameworks.forEach((framework) => {
// Reset framework counters
framework.pass = 0;
@@ -247,9 +186,9 @@ export const calculateFrameworkCounters = (frameworks: Framework[]): void => {
framework.manual = 0;
// Handle flat structure (requirements directly in framework)
const directRequirements = framework.requirements ?? [];
const directRequirements = (framework as any).requirements || [];
if (directRequirements.length > 0) {
directRequirements.forEach((requirement) => {
directRequirements.forEach((requirement: Requirement) => {
updateCounters(framework, requirement.status);
});
return;

View File

@@ -1,4 +1,4 @@
import { createElement, ReactNode } from "react";
import React from "react";
import { AWSWellArchitectedCustomDetails } from "@/components/compliance/compliance-custom-details/aws-well-architected-details";
import { C5CustomDetails } from "@/components/compliance/compliance-custom-details/c5-details";
@@ -14,10 +14,10 @@ import { AccordionItemProps } from "@/components/ui/accordion/Accordion";
import {
AttributesData,
CategoryData,
FailedSection,
Framework,
Requirement,
RequirementsData,
TopFailedResult,
} from "@/types/compliance";
import {
@@ -74,9 +74,9 @@ export interface ComplianceMapper {
data: Framework[],
scanId: string | undefined,
) => AccordionItemProps[];
getTopFailedSections: (mappedData: Framework[]) => TopFailedResult;
getTopFailedSections: (mappedData: Framework[]) => FailedSection[];
calculateCategoryHeatmapData: (complianceData: Framework[]) => CategoryData[];
getDetailsComponent: (requirement: Requirement) => ReactNode;
getDetailsComponent: (requirement: Requirement) => React.ReactNode;
}
const getDefaultMapper = (): ComplianceMapper => ({
@@ -86,7 +86,7 @@ const getDefaultMapper = (): ComplianceMapper => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(GenericCustomDetails, { requirement }),
React.createElement(GenericCustomDetails, { requirement }),
});
const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
@@ -97,7 +97,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(C5CustomDetails, { requirement }),
React.createElement(C5CustomDetails, { requirement }),
},
ENS: {
mapComplianceData: mapENSComplianceData,
@@ -106,7 +106,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(ENSCustomDetails, { requirement }),
React.createElement(ENSCustomDetails, { requirement }),
},
ISO27001: {
mapComplianceData: mapISOComplianceData,
@@ -115,7 +115,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(ISOCustomDetails, { requirement }),
React.createElement(ISOCustomDetails, { requirement }),
},
CIS: {
mapComplianceData: mapCISComplianceData,
@@ -124,7 +124,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(CISCustomDetails, { requirement }),
React.createElement(CISCustomDetails, { requirement }),
},
"AWS-Well-Architected-Framework-Security-Pillar": {
mapComplianceData: mapAWSWellArchitectedComplianceData,
@@ -133,7 +133,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(AWSWellArchitectedCustomDetails, { requirement }),
React.createElement(AWSWellArchitectedCustomDetails, { requirement }),
},
"AWS-Well-Architected-Framework-Reliability-Pillar": {
mapComplianceData: mapAWSWellArchitectedComplianceData,
@@ -142,7 +142,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(AWSWellArchitectedCustomDetails, { requirement }),
React.createElement(AWSWellArchitectedCustomDetails, { requirement }),
},
"KISA-ISMS-P": {
mapComplianceData: mapKISAComplianceData,
@@ -151,7 +151,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(KISACustomDetails, { requirement }),
React.createElement(KISACustomDetails, { requirement }),
},
"MITRE-ATTACK": {
mapComplianceData: mapMITREComplianceData,
@@ -159,7 +159,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
getTopFailedSections: getMITRETopFailedSections,
calculateCategoryHeatmapData: calculateMITRECategoryHeatmapData,
getDetailsComponent: (requirement: Requirement) =>
createElement(MITRECustomDetails, { requirement }),
React.createElement(MITRECustomDetails, { requirement }),
},
ProwlerThreatScore: {
mapComplianceData: mapThetaComplianceData,
@@ -168,7 +168,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (complianceData: Framework[]) =>
calculateCategoryHeatmapData(complianceData),
getDetailsComponent: (requirement: Requirement) =>
createElement(ThreatCustomDetails, { requirement }),
React.createElement(ThreatCustomDetails, { requirement }),
},
CCC: {
mapComplianceData: mapCCCComplianceData,
@@ -177,7 +177,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
createElement(CCCCustomDetails, { requirement }),
React.createElement(CCCCustomDetails, { requirement }),
},
});

View File

@@ -12,8 +12,6 @@ import {
REQUIREMENT_STATUS,
RequirementsData,
RequirementStatus,
TOP_FAILED_DATA_TYPE,
TopFailedResult,
} from "@/types/compliance";
import {
@@ -22,12 +20,6 @@ import {
findOrCreateFramework,
} from "./commons";
// Type for the internal map used in getTopFailedSections
interface FailedSectionData {
total: number;
types: Record<string, number>;
}
export const mapComplianceData = (
attributesData: AttributesData,
requirementsData: RequirementsData,
@@ -100,9 +92,9 @@ export const mapComplianceData = (
}) || [],
};
// Add requirement directly to framework (flat structure - no categories)
framework.requirements = framework.requirements ?? [];
framework.requirements.push(requirement);
// Add requirement directly to framework (store in a special property)
(framework as any).requirements = (framework as any).requirements || [];
(framework as any).requirements.push(requirement);
}
// Calculate counters using common helper (works with flat structure)
@@ -116,63 +108,63 @@ export const toAccordionItems = (
scanId: string | undefined,
): AccordionItemProps[] => {
return data.flatMap((framework) => {
const requirements = framework.requirements ?? [];
const requirements = (framework as any).requirements || [];
// Filter out requirements without metadata (can't be displayed in accordion)
const displayableRequirements = requirements.filter(
(requirement) => requirement.hasMetadata !== false,
(requirement: Requirement) => requirement.hasMetadata !== false,
);
return displayableRequirements.map((requirement, i) => {
const itemKey = `${framework.name}-req-${i}`;
return displayableRequirements.map(
(requirement: Requirement, i: number) => {
const itemKey = `${framework.name}-req-${i}`;
return {
key: itemKey,
title: (
<ComplianceAccordionRequirementTitle
type=""
name={requirement.name}
status={requirement.status as FindingStatus}
/>
),
content: (
<ClientAccordionContent
key={`content-${itemKey}`}
requirement={requirement}
scanId={scanId || ""}
framework={framework.name}
disableFindings={
requirement.check_ids.length === 0 && requirement.manual === 0
}
/>
),
items: [],
};
});
return {
key: itemKey,
title: (
<ComplianceAccordionRequirementTitle
type=""
name={requirement.name}
status={requirement.status as FindingStatus}
/>
),
content: (
<ClientAccordionContent
key={`content-${itemKey}`}
requirement={requirement}
scanId={scanId || ""}
framework={framework.name}
disableFindings={
requirement.check_ids.length === 0 && requirement.manual === 0
}
/>
),
items: [],
};
},
);
});
};
// Custom function for MITRE to get top failed sections grouped by tactics
export const getTopFailedSections = (
mappedData: Framework[],
): TopFailedResult => {
const failedSectionMap = new Map<string, FailedSectionData>();
): FailedSection[] => {
const failedSectionMap = new Map();
mappedData.forEach((framework) => {
const requirements = framework.requirements ?? [];
const requirements = (framework as any).requirements || [];
requirements.forEach((requirement) => {
requirements.forEach((requirement: Requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const tactics = Array.isArray(requirement.tactics)
? (requirement.tactics as string[])
: [];
const tactics = (requirement.tactics as string[]) || [];
tactics.forEach((tactic) => {
if (!failedSectionMap.has(tactic)) {
failedSectionMap.set(tactic, { total: 0, types: {} });
}
const sectionData = failedSectionMap.get(tactic)!;
const sectionData = failedSectionMap.get(tactic);
sectionData.total += 1;
const type = "Fails";
@@ -183,13 +175,10 @@ export const getTopFailedSections = (
});
// Convert in descending order and slice top 5
return {
items: Array.from(failedSectionMap.entries())
.map(([name, data]): FailedSection => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5),
type: TOP_FAILED_DATA_TYPE.SECTIONS,
};
return Array.from(failedSectionMap.entries())
.map(([name, data]) => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5); // Top 5
};
// Custom function for MITRE to calculate category heatmap data grouped by tactics
@@ -208,12 +197,10 @@ export const calculateCategoryHeatmapData = (
// Aggregate data by tactics
complianceData.forEach((framework) => {
const requirements = framework.requirements ?? [];
const requirements = (framework as any).requirements || [];
requirements.forEach((requirement) => {
const tactics = Array.isArray(requirement.tactics)
? (requirement.tactics as string[])
: [];
requirements.forEach((requirement: Requirement) => {
const tactics = (requirement.tactics as string[]) || [];
tactics.forEach((tactic) => {
const existing = tacticMap.get(tactic) || {

View File

@@ -68,27 +68,12 @@ export interface Framework {
fail: number;
manual: number;
categories: Category[];
// Optional: flat structure for frameworks like MITRE that don't have categories
requirements?: Requirement[];
}
export interface FailedSection {
name: string;
total: number;
types?: Record<string, number>;
}
export const TOP_FAILED_DATA_TYPE = {
SECTIONS: "sections",
REQUIREMENTS: "requirements",
} as const;
export type TopFailedDataType =
(typeof TOP_FAILED_DATA_TYPE)[keyof typeof TOP_FAILED_DATA_TYPE];
export interface TopFailedResult {
items: FailedSection[];
type: TopFailedDataType;
types?: { [key: string]: number };
}
export interface RequirementsTotals {
@@ -107,7 +92,7 @@ export interface ENSAttributesMetadata {
Nivel: string;
Dimensiones: string[];
ModoEjecucion: string;
Dependencias: unknown[];
Dependencias: any[];
}
export interface ISO27001AttributesMetadata {