Compare commits

...

10 Commits

Author SHA1 Message Date
Alan Buscaglia acee366e82 docs(api): add Silk profiling and improve test data guidance 2025-12-16 15:19:06 +01:00
Alan Buscaglia 47d66c9c4c docs(api): update guide with project-specific patterns
- Add rls_transaction usage examples
- Add DJANGO_LOGGING_LEVEL=DEBUG option
- Add RLS context for raw SQL (set_config)
- Add section on partitioned tables index creation
- Improve index design guidance for RLS
2025-12-09 12:47:48 +01:00
Alan Buscaglia 8d41941d22 docs(api): add query performance guide and PR checklist item
- Add query-performance-guide.md with EXPLAIN ANALYZE best practices
- Add checklist item in PR template for query performance validation
- Document common issues, fixes, and minimum test data requirements
2025-12-09 12:43:00 +01:00
Hugo Pereira Brito 962c64eae5 chore: execute tests for only needed aws services (#9468) 2025-12-09 11:06:07 +01:00
César Arroba 7b56f0640f chore(github): fix release messages (#9459) 2025-12-09 10:06:55 +01:00
Alan Buscaglia 49c75cc418 fix(ui): add default date_from filter for severity over time endpoint (#9472) 2025-12-05 17:55:04 +01:00
Alan Buscaglia 56bca7c104 feat(ui): implement Risk Plot component with interactive legend and navigation (#9469) 2025-12-05 14:03:58 +01:00
Rubén De la Torre Vico faaa172b86 chore(aws): enhance metadata for macie service (#9265)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-12-05 12:03:13 +01:00
Alan Buscaglia 219ce0ba89 feat(ui): add navigation progress bar for better UX during page transitions (#9465) 2025-12-05 12:01:00 +01:00
Adrián Peña 2170e5fe12 feat(api): add findings severity timeseries endpoint (#9363) 2025-12-05 11:19:37 +01:00
47 changed files with 2686 additions and 576 deletions
+1
View File
@@ -32,6 +32,7 @@ Please add a detailed description of how to review this PR.
#### API
- [ ] Verify if API specs need to be regenerated.
- [ ] Check if version updates are required (e.g., specs, Poetry, etc.).
- [ ] Query performance validated with `EXPLAIN ANALYZE` for new/modified endpoints. See [Query Performance Guide](https://github.com/prowler-cloud/prowler/blob/master/api/docs/query-performance-guide.md).
- [ ] Ensure new entries are added to [CHANGELOG.md](https://github.com/prowler-cloud/prowler/blob/master/api/CHANGELOG.md), if applicable.
### License
+61 -33
View File
@@ -48,8 +48,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -78,21 +104,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push API container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -106,23 +117,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -169,6 +163,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+61 -33
View File
@@ -47,8 +47,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -76,21 +102,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push MCP container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -112,23 +123,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -175,6 +169,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+104 -78
View File
@@ -50,30 +50,15 @@ env:
AWS_REGION: us-east-1
jobs:
container-build-push:
setup:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ${{ matrix.runner }}
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
arch: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
arch: arm64
timeout-minutes: 45
permissions:
contents: read
packages: write
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
env:
POETRY_VIRTUALENVS_CREATE: 'false'
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
@@ -93,32 +78,24 @@ jobs:
run: |
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
# Extract major version
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
# Set version-specific tags
case ${PROWLER_VERSION_MAJOR} in
3)
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
;;
4)
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
;;
5)
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v5 detected - tags: latest, stable"
@@ -129,6 +106,53 @@ jobs:
;;
esac
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: SDK
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
arch: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
arch: arm64
timeout-minutes: 45
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
@@ -147,21 +171,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: SDK
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push SDK container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -172,30 +181,13 @@ jobs:
push: true
platforms: ${{ matrix.platform }}
tags: |
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}-${{ matrix.arch }}
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: SDK
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [container-build-push]
needs: [setup, container-build-push]
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
@@ -222,24 +214,24 @@ jobs:
if: github.event_name == 'push'
run: |
docker buildx imagetools create \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
- name: Create and push manifests for release event
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
run: |
docker buildx imagetools create \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.stable_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.stable_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
- name: Install regctl
if: always()
@@ -249,13 +241,47 @@ jobs:
if: always()
run: |
echo "Cleaning up intermediate tags..."
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: SDK
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
dispatch-v3-deployment:
if: needs.container-build-push.outputs.prowler_version_major == '3'
needs: container-build-push
if: needs.setup.outputs.prowler_version_major == '3'
needs: [setup, container-build-push]
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
@@ -282,4 +308,4 @@ jobs:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
event-type: dispatch
client-payload: '{"version":"release","tag":"${{ needs.container-build-push.outputs.prowler_version }}"}'
client-payload: '{"version":"release","tag":"${{ needs.setup.outputs.prowler_version }}"}'
+102 -1
View File
@@ -82,9 +82,110 @@ jobs:
./tests/**/aws/**
./poetry.lock
- name: Resolve AWS services under test
if: steps.changed-aws.outputs.any_changed == 'true'
id: aws-services
shell: bash
run: |
python3 <<'PY'
import os
from pathlib import Path
dependents = {
"acm": ["elb"],
"autoscaling": ["dynamodb"],
"awslambda": ["ec2", "inspector2"],
"backup": ["dynamodb", "ec2", "rds"],
"cloudfront": ["shield"],
"cloudtrail": ["awslambda", "cloudwatch"],
"cloudwatch": ["bedrock"],
"ec2": ["dlm", "dms", "elbv2", "emr", "inspector2", "rds", "redshift", "route53", "shield", "ssm"],
"ecr": ["inspector2"],
"elb": ["shield"],
"elbv2": ["shield"],
"globalaccelerator": ["shield"],
"iam": ["bedrock", "cloudtrail", "cloudwatch", "codebuild"],
"kafka": ["firehose"],
"kinesis": ["firehose"],
"kms": ["kafka"],
"organizations": ["iam", "servicecatalog"],
"route53": ["shield"],
"s3": ["bedrock", "cloudfront", "cloudtrail", "macie"],
"ssm": ["ec2"],
"vpc": ["awslambda", "ec2", "efs", "elasticache", "neptune", "networkfirewall", "rds", "redshift", "workspaces"],
"waf": ["elbv2"],
"wafv2": ["cognito", "elbv2"],
}
changed_raw = """${{ steps.changed-aws.outputs.all_changed_files }}"""
# all_changed_files is space-separated, not newline-separated
# Strip leading "./" if present for consistent path handling
changed_files = [Path(f.lstrip("./")) for f in changed_raw.split() if f]
services = set()
run_all = False
for path in changed_files:
path_str = path.as_posix()
parts = path.parts
if path_str.startswith("prowler/providers/aws/services/"):
if len(parts) > 4 and "." not in parts[4]:
services.add(parts[4])
else:
run_all = True
elif path_str.startswith("tests/providers/aws/services/"):
if len(parts) > 4 and "." not in parts[4]:
services.add(parts[4])
else:
run_all = True
elif path_str.startswith("prowler/providers/aws/") or path_str.startswith("tests/providers/aws/"):
run_all = True
# Expand with direct dependent services (one level only)
# We only test services that directly depend on the changed services,
# not transitive dependencies (services that depend on dependents)
original_services = set(services)
for svc in original_services:
for dep in dependents.get(svc, []):
services.add(dep)
if run_all or not services:
run_all = True
services = set()
service_paths = " ".join(sorted(f"tests/providers/aws/services/{svc}" for svc in services))
output_lines = [
f"run_all={'true' if run_all else 'false'}",
f"services={' '.join(sorted(services))}",
f"service_paths={service_paths}",
]
with open(os.environ["GITHUB_OUTPUT"], "a") as gh_out:
for line in output_lines:
gh_out.write(line + "\n")
print(f"AWS changed files (filtered): {changed_raw or 'none'}")
print(f"Run all AWS tests: {run_all}")
if services:
print(f"AWS service test paths: {service_paths}")
else:
print("AWS service test paths: none detected")
PY
- name: Run AWS tests
if: steps.changed-aws.outputs.any_changed == 'true'
run: poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
run: |
echo "AWS run_all=${{ steps.aws-services.outputs.run_all }}"
echo "AWS service_paths='${{ steps.aws-services.outputs.service_paths }}'"
if [ "${{ steps.aws-services.outputs.run_all }}" = "true" ]; then
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
elif [ -z "${{ steps.aws-services.outputs.service_paths }}" ]; then
echo "No AWS service paths detected; skipping AWS tests."
else
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${{ steps.aws-services.outputs.service_paths }}
fi
- name: Upload AWS coverage to Codecov
if: steps.changed-aws.outputs.any_changed == 'true'
+61 -33
View File
@@ -50,8 +50,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -80,21 +106,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push UI container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -111,23 +122,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -174,6 +168,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+1
View File
@@ -6,6 +6,7 @@ All notable changes to the **Prowler API** are documented in this file.
### Added
- New endpoint to retrieve an overview of the attack surfaces [(#9309)](https://github.com/prowler-cloud/prowler/pull/9309)
- New endpoint `GET /api/v1/overviews/findings_severity/timeseries` to retrieve daily aggregated findings by severity level [(#9363)](https://github.com/prowler-cloud/prowler/pull/9363)
- Lighthouse AI support for Amazon Bedrock API key [(#9343)](https://github.com/prowler-cloud/prowler/pull/9343)
- Exception handler for provider deletions during scans [(#9414)](https://github.com/prowler-cloud/prowler/pull/9414)
- Support to use admin credentials through the read replica database [(#9440)](https://github.com/prowler-cloud/prowler/pull/9440)
+457
View File
@@ -0,0 +1,457 @@
# Query Performance Guide
## Overview
This guide explains how to validate query performance when developing new endpoints or modifying existing ones. **This is part of the development process**, not a separate task—just like writing unit tests.
The goal is simple: ensure PostgreSQL uses indexes correctly for the queries your code generates.
## When to Validate
You **must** validate query performance when:
- Creating a new endpoint that queries the database
- Modifying an existing query (adding filters, joins, or sorting)
- Adding new indexes
- Working on performance-critical endpoints (overviews, findings, resources)
## Profiling with Django Silk (Recommended)
[Django Silk](https://github.com/jazzband/django-silk) is the recommended way to profile queries because it captures the actual SQL generated by your code during real HTTP requests. This gives you the most accurate picture of what happens in production.
### Enabling Silk
Silk is installed as a dev dependency but disabled by default. To enable it temporarily for profiling:
#### 1. Add Silk to your local settings
In `api/src/backend/config/django/devel.py`, add at the end of the file:
```python
# Silk profiler (temporary - remove after profiling)
INSTALLED_APPS += ["silk"] # noqa: F405
MIDDLEWARE += ["silk.middleware.SilkyMiddleware"] # noqa: F405
```
#### 2. Add Silk URLs
In `api/src/backend/api/v1/urls.py`, add at the end:
```python
from django.conf import settings
if settings.DEBUG:
urlpatterns += [path("silk/", include("silk.urls", namespace="silk"))]
```
#### 3. Run Silk migrations
```bash
cd api/src/backend
poetry run python manage.py migrate --database admin
```
#### 4. Access Silk
Start the development server and navigate to `http://localhost:8000/api/v1/silk/`
### Using Silk
1. Make requests to the endpoint you want to profile
2. Open Silk UI and find your request
3. Click on the request to see all SQL queries executed
4. For each query, you can see:
- Execution time
- Number of similar queries (N+1 detection)
- The actual SQL with parameters
- **EXPLAIN output** (click on a query to see it)
### Disabling Silk
After profiling, **remove the changes** you made to `devel.py` and `urls.py`. Don't commit Silk configuration to the repository.
## Manual Query Analysis with EXPLAIN ANALYZE
For quick checks or when you need more control, you can run `EXPLAIN ANALYZE` directly.
### 1. Get Your Query
#### Option A: Using Django Shell with RLS
This approach mirrors how queries run in production with Row Level Security enabled:
```bash
cd api/src/backend
poetry run python manage.py shell
```
```python
from django.db import connection
from api.db_utils import rls_transaction
from api.models import Finding
tenant_id = "your-tenant-uuid"
with rls_transaction(tenant_id):
# Build your queryset
qs = Finding.objects.filter(status="FAIL").order_by("-inserted_at")[:25]
# Force evaluation
list(qs)
# Get the SQL
print(connection.queries[-1]['sql'])
```
#### Option B: Print Query Without Execution
```python
from api.models import Finding
queryset = Finding.objects.filter(status="FAIL")
print(queryset.query)
```
> **Note:** This won't include RLS filters, so the actual production query will differ.
#### Option C: Enable SQL Logging
Set `DJANGO_LOGGING_LEVEL=DEBUG` in your environment:
```bash
DJANGO_LOGGING_LEVEL=DEBUG poetry run python manage.py runserver
```
### 2. Run EXPLAIN ANALYZE
Connect to PostgreSQL and run:
```sql
EXPLAIN ANALYZE <your_query>;
```
Or with more details:
```sql
EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT) <your_query>;
```
#### Running EXPLAIN with RLS Context
To test with RLS enabled (as it runs in production), set the tenant context first:
```sql
-- Set tenant context
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
-- Then run your EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL' LIMIT 25;
```
### 3. Interpret the Results
#### Good Signs (Index is being used)
```
Index Scan using findings_tenant_status_idx on findings
Index Cond: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
Rows Removed by Filter: 0
Actual Rows: 150
Planning Time: 0.5 ms
Execution Time: 2.3 ms
```
#### Bad Signs (Sequential scan - no index)
```
Seq Scan on findings
Filter: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
Rows Removed by Filter: 999850
Actual Rows: 150
Planning Time: 0.3 ms
Execution Time: 450.2 ms
```
## Quick Reference: What to Look For
| What You See | Meaning | Action |
|--------------|---------|--------|
| `Index Scan` | Index is being used | Good, no action needed |
| `Index Only Scan` | Even better - data comes from index only | Good, no action needed |
| `Bitmap Index Scan` | Index used, results combined | Usually fine |
| `Seq Scan` on large tables | Full table scan, no index | **Needs investigation** |
| `Rows Removed by Filter: <high number>` | Fetching too many rows | **Query or index issue** |
| High `Execution Time` | Query is slow | **Needs optimization** |
## Common Issues and Fixes
### 1. Missing Index
**Problem:** `Seq Scan` on a filtered column
```sql
-- Bad: No index on status
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
-- Shows: Seq Scan on findings
```
**Fix:** Add an index
```python
# In your model
class Meta:
indexes = [
models.Index(fields=['status'], name='findings_status_idx'),
]
```
### 2. Index Not Used Due to Type Mismatch
**Problem:** Index exists but PostgreSQL doesn't use it
```sql
-- If tenant_id is UUID but you're passing a string without cast
WHERE tenant_id = 'some-uuid-string'
```
**Fix:** Ensure proper type casting in your queries
### 3. Index Not Used Due to Function Call
**Problem:** Wrapping column in a function prevents index usage
```sql
-- Bad: Index on inserted_at won't be used
WHERE DATE(inserted_at) = '2024-01-01'
-- Good: Use range instead
WHERE inserted_at >= '2024-01-01' AND inserted_at < '2024-01-02'
```
### 4. Wrong Index for Sorting
**Problem:** Query is sorted but index doesn't match sort order
```sql
-- If you have ORDER BY inserted_at DESC
-- You need an index with DESC or PostgreSQL will sort in memory
```
**Fix:** Create index with matching sort order
```python
class Meta:
indexes = [
models.Index(fields=['-inserted_at'], name='findings_inserted_desc_idx'),
]
```
### 5. Composite Index Column Order
**Problem:** Index exists but columns are in wrong order
```sql
-- Index on (tenant_id, scan_id)
-- This query WON'T use the index efficiently:
WHERE scan_id = '...'
-- This query WILL use the index:
WHERE tenant_id = '...' AND scan_id = '...'
```
**Rule:** The leftmost columns in a composite index must be in your WHERE clause.
## RLS (Row Level Security) Considerations
Prowler uses Row Level Security via PostgreSQL's `set_config`. When analyzing queries, remember:
1. RLS policies add implicit `WHERE tenant_id = current_tenant()` to queries
2. Always test with RLS enabled (how it runs in production)
3. Ensure `tenant_id` is the **first column** in composite indexes
### Using rls_transaction in Code
The `rls_transaction` context manager from `api.db_utils` sets the tenant context for all queries within its scope:
```python
from api.db_utils import rls_transaction
from api.models import Finding
with rls_transaction(tenant_id):
# All queries here will have RLS applied
qs = Finding.objects.filter(status="FAIL")
list(qs) # Execute
```
### Using RLS in Raw SQL (psql)
```sql
-- Set tenant context for the transaction
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
-- Now RLS policies will filter by this tenant
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
```
### Index Design for RLS
Since every query includes `tenant_id` via RLS, your composite indexes should **always start with `tenant_id`**:
```python
class Meta:
indexes = [
# Good: tenant_id first
models.Index(fields=['tenant_id', 'status', 'severity']),
# Bad: tenant_id not first - RLS queries won't use this efficiently
models.Index(fields=['status', 'tenant_id']),
]
```
## Test Data Requirements
The amount of test data you need depends on what you're testing. PostgreSQL's query planner considers table statistics, index definitions, and data distribution when choosing execution plans.
### Important Considerations
1. **Small datasets may not use indexes**: PostgreSQL may choose a sequential scan over an index scan if the table is small enough that scanning it directly is faster. This is expected behavior.
2. **Data must exist in the tables you're querying**: If your endpoint queries `findings`, `resources`, `scans`, or other tables, ensure those tables have data. Use the `findings` management command to generate test data:
```bash
cd api/src/backend
poetry run python manage.py findings \
--tenant <TENANT_ID> \
--findings 1000 \
--resources 500 \
--batch 500
```
3. **Update table statistics**: After inserting test data, run `ANALYZE` to update PostgreSQL's statistics:
```sql
ANALYZE findings;
ANALYZE resources;
ANALYZE scans;
-- Add other tables as needed
```
4. **Test with realistic data distribution**: If your query filters by a specific value (e.g., `status='FAIL'`), ensure your test data includes a realistic mix of values.
### When Index Usage Matters Most
Focus on validating index usage when:
- The table will have thousands or millions of rows in production
- The query is called frequently (list endpoints, dashboards)
- The query has multiple filters or joins
For small lookup tables or infrequently-called endpoints, sequential scans may be acceptable.
## Performance Checklist for PRs
Before submitting a PR that adds or modifies database queries:
- [ ] Profiled queries with Silk or `EXPLAIN ANALYZE`
- [ ] Verified indexes are being used (no unexpected `Seq Scan` on large tables)
- [ ] Checked `Rows Removed by Filter` is reasonable
- [ ] Tested with RLS enabled
- [ ] For critical endpoints: documented the query plan in the PR
## Useful Commands
### Update Table Statistics
```sql
ANALYZE findings;
ANALYZE resources;
```
### See Existing Indexes
```sql
SELECT indexname, indexdef
FROM pg_indexes
WHERE tablename = 'findings';
```
### See Index Usage Stats
```sql
SELECT
schemaname,
tablename,
indexname,
idx_scan,
idx_tup_read,
idx_tup_fetch
FROM pg_stat_user_indexes
WHERE tablename = 'findings'
ORDER BY idx_scan DESC;
```
### Check Table Size
```sql
SELECT
relname as table_name,
pg_size_pretty(pg_total_relation_size(relid)) as total_size
FROM pg_catalog.pg_statio_user_tables
WHERE relname IN ('findings', 'resources', 'scans')
ORDER BY pg_total_relation_size(relid) DESC;
```
## Working with Partitioned Tables
The `findings` and `resource_finding_mappings` tables are partitioned. When adding indexes, use the helper functions from `api.db_utils`:
### Adding Indexes to Partitions
```python
# In a migration file
from functools import partial
from django.db import migrations
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
class Migration(migrations.Migration):
atomic = False # Required for CONCURRENTLY
dependencies = [
("api", "XXXX_previous_migration"),
]
operations = [
migrations.RunPython(
partial(
create_index_on_partitions,
parent_table="findings",
index_name="my_new_idx",
columns="tenant_id, status, severity",
all_partitions=False, # Only current/future partitions
),
reverse_code=partial(
drop_index_on_partitions,
parent_table="findings",
index_name="my_new_idx",
),
),
]
```
### Parameters
- `all_partitions=False` (default): Only creates indexes on current and future partitions. Use this for new indexes to avoid maintenance overhead on old data.
- `all_partitions=True`: Creates indexes on all partitions. Use when migrating critical existing indexes.
See [Partitions Documentation](./partitions.md) for more details on partitioning strategy.
## Further Reading
- [Django Silk Documentation](https://github.com/jazzband/django-silk)
- [PostgreSQL EXPLAIN Documentation](https://www.postgresql.org/docs/current/sql-explain.html)
- [Using EXPLAIN](https://www.postgresql.org/docs/current/using-explain.html)
- [Index Types in PostgreSQL](https://www.postgresql.org/docs/current/indexes-types.html)
- [Prowler Partitions Documentation](./partitions.md)
+63
View File
@@ -25,6 +25,7 @@ from api.db_utils import (
from api.models import (
AttackSurfaceOverview,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -795,6 +796,68 @@ class ScanSummaryFilter(FilterSet):
}
class DailySeveritySummaryFilter(FilterSet):
"""Filter for findings_severity/timeseries endpoint."""
MAX_DATE_RANGE_DAYS = 365
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
provider_id__in = UUIDInFilter(field_name="provider_id", lookup_expr="in")
provider_type = ChoiceFilter(
field_name="provider__provider", choices=Provider.ProviderChoices.choices
)
provider_type__in = ChoiceInFilter(
field_name="provider__provider", choices=Provider.ProviderChoices.choices
)
date_from = DateFilter(method="filter_noop")
date_to = DateFilter(method="filter_noop")
class Meta:
model = DailySeveritySummary
fields = ["provider_id"]
def filter_noop(self, queryset, name, value):
return queryset
def filter_queryset(self, queryset):
if not self.data.get("date_from"):
raise ValidationError(
[
{
"detail": "This query parameter is required.",
"status": "400",
"source": {"pointer": "filter[date_from]"},
"code": "required",
}
]
)
today = date.today()
date_from = self.form.cleaned_data.get("date_from")
date_to = min(self.form.cleaned_data.get("date_to") or today, today)
if (date_to - date_from).days > self.MAX_DATE_RANGE_DAYS:
raise ValidationError(
[
{
"detail": f"Date range cannot exceed {self.MAX_DATE_RANGE_DAYS} days.",
"status": "400",
"source": {"pointer": "filter[date_from]"},
"code": "invalid",
}
]
)
# View access
self.request._date_from = date_from
self.request._date_to = date_to
# Apply date filter (only lte for fill-forward logic)
queryset = queryset.filter(date__lte=date_to)
return super().filter_queryset(queryset)
class ScanSummarySeverityFilter(ScanSummaryFilter):
"""Filter for findings_severity ScanSummary endpoint - includes status filters"""
@@ -0,0 +1,96 @@
# Generated by Django 5.1.14 on 2025-12-03 13:38
import uuid
import django.db.models.deletion
from django.db import migrations, models
import api.rls
class Migration(migrations.Migration):
dependencies = [
("api", "0060_attack_surface_overview"),
]
operations = [
migrations.CreateModel(
name="DailySeveritySummary",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("date", models.DateField()),
("critical", models.IntegerField(default=0)),
("high", models.IntegerField(default=0)),
("medium", models.IntegerField(default=0)),
("low", models.IntegerField(default=0)),
("informational", models.IntegerField(default=0)),
("muted", models.IntegerField(default=0)),
(
"provider",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
to="api.provider",
),
),
(
"scan",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
to="api.scan",
),
),
(
"tenant",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="api.tenant",
),
),
],
options={
"db_table": "daily_severity_summaries",
"abstract": False,
},
),
migrations.AddIndex(
model_name="dailyseveritysummary",
index=models.Index(
fields=["tenant_id", "id"],
name="dss_tenant_id_idx",
),
),
migrations.AddIndex(
model_name="dailyseveritysummary",
index=models.Index(
fields=["tenant_id", "provider_id"],
name="dss_tenant_provider_idx",
),
),
migrations.AddConstraint(
model_name="dailyseveritysummary",
constraint=models.UniqueConstraint(
fields=("tenant_id", "provider", "date"),
name="unique_daily_severity_summary",
),
),
migrations.AddConstraint(
model_name="dailyseveritysummary",
constraint=api.rls.RowLevelSecurityConstraint(
"tenant_id",
name="rls_on_dailyseveritysummary",
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
),
),
]
+59
View File
@@ -1500,6 +1500,65 @@ class ScanSummary(RowLevelSecurityProtectedModel):
resource_name = "scan-summaries"
class DailySeveritySummary(RowLevelSecurityProtectedModel):
"""
Pre-aggregated daily severity counts per provider.
Used by findings_severity/timeseries endpoint for efficient queries.
"""
objects = ActiveProviderManager()
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
date = models.DateField()
provider = models.ForeignKey(
Provider,
on_delete=models.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
)
scan = models.ForeignKey(
Scan,
on_delete=models.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
)
# Aggregated fail counts by severity
critical = models.IntegerField(default=0)
high = models.IntegerField(default=0)
medium = models.IntegerField(default=0)
low = models.IntegerField(default=0)
informational = models.IntegerField(default=0)
muted = models.IntegerField(default=0)
class Meta(RowLevelSecurityProtectedModel.Meta):
db_table = "daily_severity_summaries"
constraints = [
models.UniqueConstraint(
fields=("tenant_id", "provider", "date"),
name="unique_daily_severity_summary",
),
RowLevelSecurityConstraint(
field="tenant_id",
name="rls_on_%(class)s",
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
),
]
indexes = [
models.Index(
fields=["tenant_id", "id"],
name="dss_tenant_id_idx",
),
models.Index(
fields=["tenant_id", "provider_id"],
name="dss_tenant_provider_idx",
),
]
class Integration(RowLevelSecurityProtectedModel):
class IntegrationChoices(models.TextChoices):
AMAZON_S3 = "amazon_s3", _("Amazon S3")
+202
View File
@@ -4940,6 +4940,154 @@ paths:
schema:
$ref: '#/components/schemas/OverviewSeverityResponse'
description: ''
/api/v1/overviews/findings_severity/timeseries:
get:
operationId: overviews_findings_severity_timeseries_retrieve
description: Retrieve daily aggregated findings data grouped by severity levels
over a date range. Returns one data point per day with counts of failed findings
by severity (critical, high, medium, low, informational) and muted findings.
Days without scans are filled forward with the most recent known values. Use
date_from (required) and date_to filters to specify the range.
summary: Get findings severity data over time
parameters:
- in: query
name: fields[findings-severity-timeseries]
schema:
type: array
items:
type: string
enum:
- id
- critical
- high
- medium
- low
- informational
- muted
- scan_ids
description: endpoint return only specific fields in the response on a per-type
basis by including a fields[TYPE] query parameter.
explode: false
- in: query
name: filter[date_from]
schema:
type: string
- in: query
name: filter[date_to]
schema:
type: string
- in: query
name: filter[provider_id]
schema:
type: string
format: uuid
- in: query
name: filter[provider_id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
type: string
enum:
- aws
- azure
- gcp
- github
- iac
- kubernetes
- m365
- mongodbatlas
- oraclecloud
description: |-
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
- in: query
name: filter[provider_type__in]
schema:
type: array
items:
type: string
enum:
- aws
- azure
- gcp
- github
- iac
- kubernetes
- m365
- mongodbatlas
- oraclecloud
description: |-
Multiple values may be separated by commas.
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
explode: false
style: form
- name: filter[search]
required: false
in: query
description: A search term.
schema:
type: string
- name: sort
required: false
in: query
description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
schema:
type: array
items:
type: string
enum:
- id
- -id
- critical
- -critical
- high
- -high
- medium
- -medium
- low
- -low
- informational
- -informational
- muted
- -muted
- scan_ids
- -scan_ids
explode: false
tags:
- Overview
security:
- JWT or API Key: []
responses:
'200':
content:
application/vnd.api+json:
schema:
$ref: '#/components/schemas/FindingsSeverityOverTimeResponse'
description: ''
/api/v1/overviews/providers:
get:
operationId: overviews_providers_retrieve
@@ -11099,6 +11247,60 @@ components:
$ref: '#/components/schemas/Finding'
required:
- data
FindingsSeverityOverTime:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
type: string
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
enum:
- findings-severity-over-time
id: {}
attributes:
type: object
properties:
id:
type: string
format: date
critical:
type: integer
high:
type: integer
medium:
type: integer
low:
type: integer
informational:
type: integer
muted:
type: integer
scan_ids:
type: array
items:
type: string
format: uuid
required:
- id
- critical
- high
- medium
- low
- informational
- muted
- scan_ids
FindingsSeverityOverTimeResponse:
type: object
properties:
data:
$ref: '#/components/schemas/FindingsSeverityOverTime'
required:
- data
Integration:
type: object
required:
+266 -1
View File
@@ -3,7 +3,7 @@ import io
import json
import os
import tempfile
from datetime import datetime, timedelta, timezone
from datetime import date, datetime, timedelta, timezone
from decimal import Decimal
from pathlib import Path
from types import SimpleNamespace
@@ -38,6 +38,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -6984,6 +6985,270 @@ class TestOverviewViewSet:
assert combined_attributes["medium"] == 4
assert combined_attributes["critical"] == 3
def test_overview_findings_severity_timeseries_requires_date_from(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries")
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "date_from" in response.json()["errors"][0]["source"]["pointer"]
def test_overview_findings_severity_timeseries_invalid_date_format(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{"filter[date_from]": "invalid-date"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Enter a valid date." in response.json()["errors"][0]["detail"]
def test_overview_findings_severity_timeseries_empty_data(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-01-01",
"filter[date_to]": "2024-01-03",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
# Should return 3 days with fill-forward (all zeros since no data)
assert len(data) == 3
for item in data:
assert item["attributes"]["critical"] == 0
assert item["attributes"]["high"] == 0
assert item["attributes"]["medium"] == 0
assert item["attributes"]["low"] == 0
assert item["attributes"]["informational"] == 0
assert item["attributes"]["muted"] == 0
assert item["attributes"]["scan_ids"] == []
def test_overview_findings_severity_timeseries_with_data(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
# Create scan for day 1
scan1 = Scan.objects.create(
name="severity-over-time-scan-1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
)
# Create scan for day 3
scan3 = Scan.objects.create(
name="severity-over-time-scan-3",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 1, 3, 12, 0, 0, tzinfo=timezone.utc),
)
# Create DailySeveritySummary for day 1
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 1, 1),
critical=10,
high=20,
medium=30,
low=40,
informational=50,
muted=5,
)
# Create DailySeveritySummary for day 3
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan3,
date=date(2024, 1, 3),
critical=15,
high=25,
medium=35,
low=45,
informational=55,
muted=10,
)
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-01-01",
"filter[date_to]": "2024-01-03",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 3
# Day 1 - actual data (id is the date)
assert data[0]["id"] == "2024-01-01"
assert data[0]["attributes"]["critical"] == 10
assert data[0]["attributes"]["high"] == 20
assert data[0]["attributes"]["scan_ids"] == [str(scan1.id)]
# Day 2 - fill forward from day 1 (no data for this day)
assert data[1]["id"] == "2024-01-02"
assert data[1]["attributes"]["critical"] == 10
assert data[1]["attributes"]["high"] == 20
assert data[1]["attributes"]["scan_ids"] == [str(scan1.id)]
# Day 3 - actual data
assert data[2]["id"] == "2024-01-03"
assert data[2]["attributes"]["critical"] == 15
assert data[2]["attributes"]["high"] == 25
assert data[2]["attributes"]["scan_ids"] == [str(scan3.id)]
def test_overview_findings_severity_timeseries_aggregates_providers(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
# Same day, different providers
scan1 = Scan.objects.create(
name="severity-over-time-scan-p1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 2, 1, 12, 0, 0, tzinfo=timezone.utc),
)
scan2 = Scan.objects.create(
name="severity-over-time-scan-p2",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 2, 1, 14, 0, 0, tzinfo=timezone.utc),
)
# Create DailySeveritySummary for provider1
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 2, 1),
critical=10,
high=20,
medium=30,
low=40,
informational=50,
muted=5,
)
# Create DailySeveritySummary for provider2
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider2,
scan=scan2,
date=date(2024, 2, 1),
critical=5,
high=10,
medium=15,
low=20,
informational=25,
muted=3,
)
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-02-01",
"filter[date_to]": "2024-02-01",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
# Should aggregate both providers
assert data[0]["attributes"]["critical"] == 15 # 10 + 5
assert data[0]["attributes"]["high"] == 30 # 20 + 10
assert data[0]["attributes"]["medium"] == 45 # 30 + 15
assert data[0]["attributes"]["low"] == 60 # 40 + 20
assert data[0]["attributes"]["informational"] == 75 # 50 + 25
assert data[0]["attributes"]["muted"] == 8 # 5 + 3
# scan_ids should contain both scans (order may vary)
assert set(data[0]["attributes"]["scan_ids"]) == {str(scan1.id), str(scan2.id)}
def test_overview_findings_severity_timeseries_provider_filter(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
scan1 = Scan.objects.create(
name="severity-over-time-filter-scan-p1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 3, 1, 12, 0, 0, tzinfo=timezone.utc),
)
scan2 = Scan.objects.create(
name="severity-over-time-filter-scan-p2",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 3, 1, 14, 0, 0, tzinfo=timezone.utc),
)
# Provider 1 - critical=100
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 3, 1),
critical=100,
high=0,
medium=0,
low=0,
informational=0,
muted=0,
)
# Provider 2 - critical=50
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider2,
scan=scan2,
date=date(2024, 3, 1),
critical=50,
high=0,
medium=0,
low=0,
informational=0,
muted=0,
)
# Filter by provider1 only
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-03-01",
"filter[date_to]": "2024-03-01",
"filter[provider_id]": str(provider1.id),
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
assert data[0]["attributes"]["critical"] == 100 # Only provider1
assert data[0]["attributes"]["scan_ids"] == [str(scan1.id)]
def test_overview_attack_surface_no_data(self, authenticated_client):
response = authenticated_client.get(reverse("overview-attack-surface"))
assert response.status_code == status.HTTP_200_OK
+16
View File
@@ -2204,6 +2204,22 @@ class OverviewSeveritySerializer(BaseSerializerV1):
resource_name = "findings-severity-overview"
class FindingsSeverityOverTimeSerializer(BaseSerializerV1):
"""Serializer for daily findings severity trend data."""
id = serializers.DateField(source="date")
critical = serializers.IntegerField()
high = serializers.IntegerField()
medium = serializers.IntegerField()
low = serializers.IntegerField()
informational = serializers.IntegerField()
muted = serializers.IntegerField()
scan_ids = serializers.ListField(child=serializers.UUIDField())
class JSONAPIMeta:
resource_name = "findings-severity-over-time"
class OverviewServiceSerializer(BaseSerializerV1):
id = serializers.CharField(source="service")
total = serializers.IntegerField()
+137 -1
View File
@@ -102,6 +102,7 @@ from api.filters import (
AttackSurfaceOverviewFilter,
ComplianceOverviewFilter,
CustomDjangoFilterBackend,
DailySeveritySummaryFilter,
FindingFilter,
IntegrationFilter,
IntegrationJiraFindingsFilter,
@@ -131,6 +132,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -184,6 +186,7 @@ from api.v1.serializers import (
FindingDynamicFilterSerializer,
FindingMetadataSerializer,
FindingSerializer,
FindingsSeverityOverTimeSerializer,
IntegrationCreateSerializer,
IntegrationJiraDispatchSerializer,
IntegrationSerializer,
@@ -4009,6 +4012,16 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
),
filters=True,
),
findings_severity_timeseries=extend_schema(
summary="Get findings severity data over time",
description=(
"Retrieve daily aggregated findings data grouped by severity levels over a date range. "
"Returns one data point per day with counts of failed findings by severity (critical, high, "
"medium, low, informational) and muted findings. Days without scans are filled forward with "
"the most recent known values. Use date_from (required) and date_to filters to specify the range."
),
filters=True,
),
attack_surface=extend_schema(
summary="Get attack surface overview",
description="Retrieve aggregated attack surface metrics from latest completed scans per provider.",
@@ -4057,7 +4070,16 @@ class OverviewViewSet(BaseRLSViewSet):
if not role.unlimited_visibility:
self.allowed_providers = providers
return ScanSummary.all_objects.filter(tenant_id=self.request.tenant_id)
tenant_id = self.request.tenant_id
# Return appropriate queryset per action
if self.action == "findings_severity_timeseries":
qs = DailySeveritySummary.objects.filter(tenant_id=tenant_id)
if hasattr(self, "allowed_providers"):
qs = qs.filter(provider_id__in=self.allowed_providers)
return qs
return ScanSummary.all_objects.filter(tenant_id=tenant_id)
def get_serializer_class(self):
if self.action == "providers":
@@ -4068,6 +4090,8 @@ class OverviewViewSet(BaseRLSViewSet):
return OverviewFindingSerializer
elif self.action == "findings_severity":
return OverviewSeveritySerializer
elif self.action == "findings_severity_timeseries":
return FindingsSeverityOverTimeSerializer
elif self.action == "services":
return OverviewServiceSerializer
elif self.action == "regions":
@@ -4085,8 +4109,18 @@ class OverviewViewSet(BaseRLSViewSet):
return ScanSummaryFilter
elif self.action == "findings_severity":
return ScanSummarySeverityFilter
elif self.action == "findings_severity_timeseries":
return DailySeveritySummaryFilter
return None
def filter_queryset(self, queryset):
# Skip OrderingFilter for findings_severity_timeseries (no inserted_at field)
if self.action == "findings_severity_timeseries":
return CustomDjangoFilterBackend().filter_queryset(
self.request, queryset, self
)
return super().filter_queryset(queryset)
@extend_schema(exclude=True)
def list(self, request, *args, **kwargs):
raise MethodNotAllowed(method="GET")
@@ -4363,6 +4397,108 @@ class OverviewViewSet(BaseRLSViewSet):
return Response(serializer.data, status=status.HTTP_200_OK)
@action(
detail=False,
methods=["get"],
url_path="findings_severity/timeseries",
url_name="findings_severity_timeseries",
)
def findings_severity_timeseries(self, request):
"""
Daily severity trends for charts. Uses DailySeveritySummary pre-aggregation.
Requires date_from filter.
"""
# Get queryset with RBAC, provider, and date filters applied
# Date validation is handled by DailySeveritySummaryFilter
daily_qs = self.filter_queryset(self.get_queryset())
date_from = request._date_from
date_to = request._date_to
if not daily_qs.exists():
# No data matches filters - return zeros
result = self._generate_zero_result(date_from, date_to)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# Fetch all data for fill-forward logic
daily_summaries = list(
daily_qs.order_by("provider_id", "-date").values(
"provider_id",
"scan_id",
"date",
"critical",
"high",
"medium",
"low",
"informational",
"muted",
)
)
if not daily_summaries:
result = self._generate_zero_result(date_from, date_to)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# Build provider_data: {provider_id: [(date, data), ...]} sorted by date desc
provider_data = defaultdict(list)
for summary in daily_summaries:
provider_data[summary["provider_id"]].append(summary)
# For each day, find the latest data per provider and sum values
result = []
current_date = date_from
while current_date <= date_to:
day_totals = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
day_scan_ids = []
for provider_id, summaries in provider_data.items():
# Find the latest data for this provider <= current_date
for summary in summaries: # Already sorted by date desc
if summary["date"] <= current_date:
day_totals["critical"] += summary["critical"] or 0
day_totals["high"] += summary["high"] or 0
day_totals["medium"] += summary["medium"] or 0
day_totals["low"] += summary["low"] or 0
day_totals["informational"] += summary["informational"] or 0
day_totals["muted"] += summary["muted"] or 0
day_scan_ids.append(summary["scan_id"])
break # Found the latest data for this provider
result.append(
{"date": current_date, "scan_ids": day_scan_ids, **day_totals}
)
current_date += timedelta(days=1)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def _generate_zero_result(self, date_from, date_to):
"""Generate a list of zero-filled results for each date in range."""
result = []
current_date = date_from
zero_values = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
"scan_ids": [],
}
while current_date <= date_to:
result.append({"date": current_date, **zero_values})
current_date += timedelta(days=1)
return result
@extend_schema(
summary="Get ThreatScore snapshots",
description=(
+101
View File
@@ -1,14 +1,18 @@
from collections import defaultdict
from django.db.models import Sum
from api.db_router import READ_REPLICA_ALIAS
from api.db_utils import rls_transaction
from api.models import (
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Resource,
ResourceFindingMapping,
ResourceScanSummary,
Scan,
ScanSummary,
StateChoices,
)
@@ -175,3 +179,100 @@ def backfill_compliance_summaries(tenant_id: str, scan_id: str):
)
return {"status": "backfilled", "inserted": len(summary_objects)}
def backfill_daily_severity_summaries(tenant_id: str, days: int = None):
"""
Backfill DailySeveritySummary from completed scans.
Groups by provider+date, keeps latest scan per day.
"""
from datetime import timedelta
from django.utils import timezone
created_count = 0
updated_count = 0
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
scan_filter = {
"tenant_id": tenant_id,
"state": StateChoices.COMPLETED,
"completed_at__isnull": False,
}
if days is not None:
cutoff_date = timezone.now() - timedelta(days=days)
scan_filter["completed_at__gte"] = cutoff_date
completed_scans = (
Scan.objects.filter(**scan_filter)
.order_by("provider_id", "-completed_at")
.values("id", "provider_id", "completed_at")
)
if not completed_scans:
return {"status": "no scans to backfill"}
# Keep only latest scan per provider/day
latest_scans_by_day = {}
for scan in completed_scans:
key = (scan["provider_id"], scan["completed_at"].date())
if key not in latest_scans_by_day:
latest_scans_by_day[key] = scan
# Process each provider/day
for (provider_id, scan_date), scan in latest_scans_by_day.items():
scan_id = scan["id"]
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
severity_totals = (
ScanSummary.objects.filter(
tenant_id=tenant_id,
scan_id=scan_id,
)
.values("severity")
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
)
severity_data = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
for row in severity_totals:
severity = row["severity"]
if severity in severity_data:
severity_data[severity] = row["total_fail"] or 0
severity_data["muted"] += row["total_muted"] or 0
with rls_transaction(tenant_id):
_, created = DailySeveritySummary.objects.update_or_create(
tenant_id=tenant_id,
provider_id=provider_id,
date=scan_date,
defaults={
"scan_id": scan_id,
"critical": severity_data["critical"],
"high": severity_data["high"],
"medium": severity_data["medium"],
"low": severity_data["low"],
"informational": severity_data["informational"],
"muted": severity_data["muted"],
},
)
if created:
created_count += 1
else:
updated_count += 1
return {
"status": "backfilled",
"created": created_count,
"updated": updated_count,
"total_days": len(latest_scans_by_day),
}
+70
View File
@@ -29,6 +29,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
MuteRule,
Processor,
@@ -1348,3 +1349,72 @@ def aggregate_attack_surface(tenant_id: str, scan_id: str):
)
else:
logger.info(f"No attack surface overview records created for scan {scan_id}")
def aggregate_daily_severity(tenant_id: str, scan_id: str):
"""Aggregate scan severity counts into DailySeveritySummary (one record per provider/day)."""
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
scan = Scan.objects.filter(
tenant_id=tenant_id,
id=scan_id,
state=StateChoices.COMPLETED,
).first()
if not scan:
logger.warning(f"Scan {scan_id} not found or not completed")
return {"status": "scan is not completed"}
provider_id = scan.provider_id
scan_date = scan.completed_at.date()
severity_totals = (
ScanSummary.objects.filter(
tenant_id=tenant_id,
scan_id=scan_id,
)
.values("severity")
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
)
severity_data = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
for row in severity_totals:
severity = row["severity"]
if severity in severity_data:
severity_data[severity] = row["total_fail"] or 0
severity_data["muted"] += row["total_muted"] or 0
with rls_transaction(tenant_id):
summary, created = DailySeveritySummary.objects.update_or_create(
tenant_id=tenant_id,
provider_id=provider_id,
date=scan_date,
defaults={
"scan_id": scan_id,
"critical": severity_data["critical"],
"high": severity_data["high"],
"medium": severity_data["medium"],
"low": severity_data["low"],
"informational": severity_data["informational"],
"muted": severity_data["muted"],
},
)
action = "created" if created else "updated"
logger.info(
f"Daily severity summary {action} for provider {provider_id} on {scan_date}"
)
return {
"status": action,
"provider_id": str(provider_id),
"date": str(scan_date),
"severity_data": severity_data,
}
+20 -2
View File
@@ -10,6 +10,7 @@ from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIR
from django_celery_beat.models import PeriodicTask
from tasks.jobs.backfill import (
backfill_compliance_summaries,
backfill_daily_severity_summaries,
backfill_resource_scan_summaries,
)
from tasks.jobs.connection import (
@@ -38,6 +39,7 @@ from tasks.jobs.muting import mute_historical_findings
from tasks.jobs.report import generate_compliance_reports_job
from tasks.jobs.scan import (
aggregate_attack_surface,
aggregate_daily_severity,
aggregate_findings,
create_compliance_requirements,
perform_prowler_scan,
@@ -75,8 +77,11 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
)
chain(
perform_scan_summary_task.si(tenant_id=tenant_id, scan_id=scan_id),
generate_outputs_task.si(
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
group(
aggregate_daily_severity_task.si(tenant_id=tenant_id, scan_id=scan_id),
generate_outputs_task.si(
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
),
),
group(
# Use optimized task that generates both reports with shared queries
@@ -523,6 +528,12 @@ def backfill_compliance_summaries_task(tenant_id: str, scan_id: str):
return backfill_compliance_summaries(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(name="backfill-daily-severity-summaries", queue="backfill")
def backfill_daily_severity_summaries_task(tenant_id: str, days: int = None):
"""Backfill DailySeveritySummary from historical scans. Use days param to limit scope."""
return backfill_daily_severity_summaries(tenant_id=tenant_id, days=days)
@shared_task(base=RLSTask, name="scan-compliance-overviews", queue="compliance")
@handle_provider_deletion
def create_compliance_requirements_task(tenant_id: str, scan_id: str):
@@ -556,6 +567,13 @@ def aggregate_attack_surface_task(tenant_id: str, scan_id: str):
return aggregate_attack_surface(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(name="scan-daily-severity", queue="overview")
@handle_provider_deletion
def aggregate_daily_severity_task(tenant_id: str, scan_id: str):
"""Aggregate scan severity into DailySeveritySummary for findings_severity/timeseries endpoint."""
return aggregate_daily_severity(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(base=RLSTask, name="lighthouse-connection-check")
@set_tenant
def check_lighthouse_connection_task(lighthouse_config_id: str, tenant_id: str = None):
+1
View File
@@ -13,6 +13,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `compute_instance_deletion_protection_enabled` check for GCP provider [(#9358)](https://github.com/prowler-cloud/prowler/pull/9358)
### Changed
- Update AWS Macie service metadata to new format [(#9265)](https://github.com/prowler-cloud/prowler/pull/9265)
- Update AWS Lightsail service metadata to new format [(#9264)](https://github.com/prowler-cloud/prowler/pull/9264)
- Update AWS GuardDuty service metadata to new format [(#9259)](https://github.com/prowler-cloud/prowler/pull/9259)
- Update AWS Network Firewall service metadata to new format [(#9382)](https://github.com/prowler-cloud/prowler/pull/9382)
@@ -1,31 +1,39 @@
{
"Provider": "aws",
"CheckID": "macie_automated_sensitive_data_discovery_enabled",
"CheckTitle": "Check if Macie automated sensitive data discovery is enabled.",
"CheckTitle": "Macie automated sensitive data discovery is enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "macie",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "AwsAccount",
"Description": "Check if automated sensitive data discovery is enabled for an Amazon Macie account. The control fails if it isn't enabled.",
"Risk": "Without automated sensitive data discovery, there could be delays in identifying sensitive data, leading to data exposure risks in Amazon S3 buckets.",
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/macie-auto-sensitive-data-discovery-check.html",
"ResourceType": "Other",
"Description": "**Amazon Macie** administrator account has **automated sensitive data discovery** enabled for S3 data. The evaluation confirms the feature's status for the account in each Region.",
"Risk": "Without continuous discovery, sensitive S3 objects remain unclassified and unnoticed, weakening **confidentiality**. Over-permissive or public access can persist undetected, enabling **data exfiltration** and delaying containment and **forensic** response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/config/latest/developerguide/macie-auto-sensitive-data-discovery-check.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/macie-controls.html#macie-2",
"https://docs.aws.amazon.com/macie/latest/user/discovery-asdd-account-enable.html"
],
"Remediation": {
"Code": {
"CLI": "aws macie2 update-automated-discovery-configuration --status ENABLED",
"CLI": "aws macie2 update-automated-discovery-configuration --status ENABLED --region <REGION>",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/macie-controls.html#macie-2",
"Other": "1. In the AWS Console, open Amazon Macie\n2. Select the correct Region from the Region selector\n3. Go to Settings > Automated sensitive data discovery\n4. Click Enable under Status (choose My account if prompted)\n5. Repeat in other Regions where Macie is enabled if needed",
"Terraform": ""
},
"Recommendation": {
"Text": "To enable and configure automated sensitive data discovery jobs for S3 buckets, refer to the Configuring automated sensitive data discovery tutorial.",
"Url": "https://docs.aws.amazon.com/macie/latest/user/discovery-asdd-account-enable.html"
"Text": "Enable and maintain `automated sensitive data discovery` for the Macie administrator across required Regions. Include relevant buckets, tune identifiers and allow lists to reduce noise, and route findings to monitoring. Complement with **least privilege** on S3 and **defense in depth** for data protection.",
"Url": "https://hub.prowler.com/check/macie_automated_sensitive_data_discovery_enabled"
}
},
"Categories": [],
"Categories": [
"secrets"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,36 @@
{
"Provider": "aws",
"CheckID": "macie_is_enabled",
"CheckTitle": "Check if Amazon Macie is enabled.",
"CheckTitle": "Amazon Macie is enabled",
"CheckType": [
"Data Protection"
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/AWS Security Best Practices"
],
"ServiceName": "macie",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id",
"Severity": "low",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "Check if Amazon Macie is enabled.",
"Risk": "Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to help you discover, monitor and protect your sensitive data in AWS.",
"Description": "**Amazon Macie** status is assessed per region with **S3** presence to determine if sensitive data discovery is operational. The outcome reflects whether Macie is active or in a `PAUSED`/not enabled state for the account and region.",
"Risk": "Without active Macie, sensitive data in **S3** can remain unclassified and exposed. Misconfigured access and public buckets may go undetected, enabling data exfiltration and secret leakage. This degrades confidentiality and widens breach blast radius by reducing visibility into where sensitive data resides.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://aws.amazon.com/macie/getting-started/"
],
"Remediation": {
"Code": {
"CLI": "aws macie2 enable-macie",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "aws macie2 enable-macie --region <REGION>",
"NativeIaC": "```yaml\n# CloudFormation: Enable Amazon Macie in this region\nResources:\n MacieSession:\n Type: AWS::Macie::Session\n Properties:\n Status: ENABLED # Critical: Enables Macie for the account in this region\n```",
"Other": "1. Sign in to the AWS Management Console and switch to the target region\n2. Open Amazon Macie\n3. Click Get started or Enable Macie\n4. If Macie shows Suspended/Paused, click Resume Macie\n5. Repeat in each region with S3 buckets as needed",
"Terraform": "```hcl\n# Enables Amazon Macie in this region\nresource \"aws_macie2_account\" \"main\" {\n # Critical: Creating this resource enables Macie for the account in the region\n}\n```"
},
"Recommendation": {
"Text": "Enable Amazon Macie and create appropriate jobs to discover sensitive data.",
"Url": "https://aws.amazon.com/macie/getting-started/"
"Text": "Enable and maintain **Amazon Macie** in all regions hosting **S3** data. Use continuous sensitive data discovery, apply custom classifications for your data types, and route findings to monitoring. Enforce least privilege for Macie access and strengthen defense in depth with restrictive bucket policies and access controls.",
"Url": "https://hub.prowler.com/check/macie_is_enabled"
}
},
"Categories": [
"secrets",
"forensics-ready"
],
"DependsOn": [],
+2
View File
@@ -6,6 +6,8 @@ All notable changes to the **Prowler UI** are documented in this file.
### 🚀 Added
- Risk Plot component with interactive legend and severity navigation to Overview page [(#9469)](https://github.com/prowler-cloud/prowler/pull/9469)
- Navigation progress bar for page transitions using Next.js `onRouterTransitionStart` [(#9465)](https://github.com/prowler-cloud/prowler/pull/9465)
- Finding Severity Over Time chart component to Overview page [(#9405)](https://github.com/prowler-cloud/prowler/pull/9405)
- Attack Surface component to Overview page [(#9412)](https://github.com/prowler-cloud/prowler/pull/9412)
+1
View File
@@ -3,6 +3,7 @@ export * from "./attack-surface";
export * from "./findings";
export * from "./providers";
export * from "./regions";
export * from "./risk-plot";
export * from "./services";
export * from "./severity-trends";
export * from "./threat-score";
+4
View File
@@ -0,0 +1,4 @@
// Risk Plot Actions
export * from "./risk-plot";
export * from "./risk-plot.adapter";
export * from "./types/risk-plot.types";
@@ -0,0 +1,94 @@
import { getProviderDisplayName } from "@/types/providers";
import type {
ProviderRiskData,
RiskPlotDataResponse,
RiskPlotPoint,
} from "./types/risk-plot.types";
/**
* Calculates percentage with proper rounding.
*/
function calculatePercentage(value: number, total: number): number {
if (total === 0) return 0;
return Math.round((value / total) * 100);
}
/**
* Adapts raw provider risk data to the format expected by RiskPlotClient.
*
* @param providersRiskData - Array of risk data per provider from API
* @returns Formatted data for the Risk Plot scatter chart
*/
export function adaptToRiskPlotData(
providersRiskData: ProviderRiskData[],
): RiskPlotDataResponse {
const points: RiskPlotPoint[] = [];
const providersWithoutData: RiskPlotDataResponse["providersWithoutData"] = [];
for (const providerData of providersRiskData) {
// Skip providers without ThreatScore data (no completed scans)
if (providerData.overallScore === null) {
providersWithoutData.push({
id: providerData.providerId,
name: providerData.providerName,
type: providerData.providerType,
});
continue;
}
// Convert provider type to display name (aws -> AWS, gcp -> Google, etc.)
const providerDisplayName = getProviderDisplayName(
providerData.providerType,
);
// Build severity data for the horizontal bar chart with percentages
let severityData;
let totalFailedFindings = 0;
if (providerData.severity) {
const { critical, high, medium, low, informational } =
providerData.severity;
totalFailedFindings = critical + high + medium + low + informational;
severityData = [
{
name: "Critical",
value: critical,
percentage: calculatePercentage(critical, totalFailedFindings),
},
{
name: "High",
value: high,
percentage: calculatePercentage(high, totalFailedFindings),
},
{
name: "Medium",
value: medium,
percentage: calculatePercentage(medium, totalFailedFindings),
},
{
name: "Low",
value: low,
percentage: calculatePercentage(low, totalFailedFindings),
},
{
name: "Info",
value: informational,
percentage: calculatePercentage(informational, totalFailedFindings),
},
];
}
points.push({
x: providerData.overallScore ?? 0,
y: totalFailedFindings,
provider: providerDisplayName,
name: providerData.providerName,
providerId: providerData.providerId,
severityData,
});
}
return { points, providersWithoutData };
}
@@ -0,0 +1,69 @@
"use server";
import { getFindingsBySeverity } from "@/actions/overview/findings";
import { getThreatScore } from "@/actions/overview/threat-score";
import { ProviderProps } from "@/types/providers";
import { ProviderRiskData } from "./types/risk-plot.types";
/**
* Fetches risk data for a single provider.
* Combines ThreatScore and Severity data in parallel.
*/
export async function getProviderRiskData(
provider: ProviderProps,
): Promise<ProviderRiskData> {
const providerId = provider.id;
const providerType = provider.attributes.provider;
const providerName = provider.attributes.alias || provider.attributes.uid;
// Fetch ThreatScore and Severity in parallel
const [threatScoreResponse, severityResponse] = await Promise.all([
getThreatScore({
filters: {
provider_id: providerId,
include: "provider",
},
}),
getFindingsBySeverity({
filters: {
"filter[provider_id]": providerId,
"filter[status]": "FAIL",
},
}),
]);
// Extract ThreatScore data
// When filtering by single provider, API returns array with one item (not aggregated)
const threatScoreData = threatScoreResponse?.data?.[0]?.attributes;
const overallScore = threatScoreData?.overall_score
? parseFloat(threatScoreData.overall_score)
: null;
const failedFindings = threatScoreData?.failed_findings ?? 0;
// Extract Severity data
const severityData = severityResponse?.data?.attributes ?? null;
return {
providerId,
providerType,
providerName,
overallScore,
failedFindings,
severity: severityData,
};
}
/**
* Fetches risk data for multiple providers in parallel.
* Used by the Risk Plot SSR component.
*/
export async function getProvidersRiskData(
providers: ProviderProps[],
): Promise<ProviderRiskData[]> {
const riskDataPromises = providers.map((provider) =>
getProviderRiskData(provider),
);
return Promise.all(riskDataPromises);
}
@@ -0,0 +1,58 @@
// Risk Plot Types
// Data structures for the Risk Plot scatter chart
import type { BarDataPoint } from "@/components/graphs/types";
/**
* Represents a single point in the Risk Plot scatter chart.
* Each point represents a provider/account with its risk metrics.
*/
export interface RiskPlotPoint {
/** ThreatScore (0-100 scale, higher = better) */
x: number;
/** Total failed findings count */
y: number;
/** Provider type display name (AWS, Azure, Google, etc.) */
provider: string;
/** Provider alias or UID (account identifier) */
name: string;
/** Provider ID for filtering/navigation */
providerId: string;
/** Severity breakdown for the horizontal bar chart */
severityData?: BarDataPoint[];
}
/**
* Raw data from the API combined for a single provider.
* Used internally before transformation to RiskPlotPoint.
*/
export interface ProviderRiskData {
providerId: string;
providerType: string;
providerName: string;
/** ThreatScore overall_score (0-100 scale) */
overallScore: number | null;
/** Failed findings from ThreatScore snapshot */
failedFindings: number;
/** Severity breakdown */
severity: {
critical: number;
high: number;
medium: number;
low: number;
informational: number;
} | null;
}
/**
* Response structure for risk plot data fetching.
*/
export interface RiskPlotDataResponse {
points: RiskPlotPoint[];
/** Providers that have no data or no completed scans */
providersWithoutData: Array<{
id: string;
name: string;
type: string;
}>;
}
@@ -1,5 +1,9 @@
"use server";
import {
getDateFromForTimeRange,
type TimeRange,
} from "@/app/(prowler)/_new-overview/severity-over-time/_constants/time-range.constants";
import { apiBaseUrl, getAuthHeaders } from "@/lib";
import { handleApiResponse } from "@/lib/server-actions-helper";
@@ -9,20 +13,6 @@ import {
FindingsSeverityOverTimeResponse,
} from "./types";
const TIME_RANGE_VALUES = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
type TimeRange = (typeof TIME_RANGE_VALUES)[keyof typeof TIME_RANGE_VALUES];
const TIME_RANGE_DAYS: Record<TimeRange, number> = {
"5D": 5,
"1W": 7,
"1M": 30,
};
export type SeverityTrendsResult =
| { status: "success"; data: AdaptedSeverityTrendsResponse }
| { status: "empty" }
@@ -76,21 +66,9 @@ export const getSeverityTrendsByTimeRange = async ({
timeRange: TimeRange;
filters?: Record<string, string | string[] | undefined>;
}): Promise<SeverityTrendsResult> => {
const days = TIME_RANGE_DAYS[timeRange];
if (!days) {
console.error("Invalid time range provided");
return { status: "error" };
}
const endDate = new Date();
const startDate = new Date(endDate.getTime() - days * 24 * 60 * 60 * 1000);
const dateFrom = startDate.toISOString().split("T")[0];
const dateFilters = {
...filters,
date_from: dateFrom,
"filter[date_from]": getDateFromForTimeRange(timeRange),
};
return getFindingsSeverityTrends({ filters: dateFilters });
+4 -2
View File
@@ -3,9 +3,10 @@ import "@/styles/globals.css";
import { GoogleTagManager } from "@next/third-parties/google";
import { Metadata, Viewport } from "next";
import { redirect } from "next/navigation";
import { ReactNode } from "react";
import { auth } from "@/auth.config";
import { Toaster } from "@/components/ui";
import { NavigationProgress, Toaster } from "@/components/ui";
import { fontSans } from "@/config/fonts";
import { siteConfig } from "@/config/site";
import { cn } from "@/lib";
@@ -33,7 +34,7 @@ export const viewport: Viewport = {
export default async function RootLayout({
children,
}: {
children: React.ReactNode;
children: ReactNode;
}) {
const session = await auth();
@@ -52,6 +53,7 @@ export default async function RootLayout({
)}
>
<Providers themeProps={{ attribute: "class", defaultTheme: "dark" }}>
<NavigationProgress />
{children}
<Toaster />
<GoogleTagManager
@@ -11,15 +11,15 @@ export const GRAPH_TABS = [
id: "threat-map",
label: "Threat Map",
},
{
id: "risk-plot",
label: "Risk Plot",
},
// TODO: Uncomment when ready to enable other tabs
// {
// id: "risk-radar",
// label: "Risk Radar",
// },
// {
// id: "risk-plot",
// label: "Risk Plot",
// },
] as const;
export type TabId = (typeof GRAPH_TABS)[number]["id"];
@@ -7,9 +7,9 @@ import { GraphsTabsClient } from "./_components/graphs-tabs-client";
import { GRAPH_TABS, type TabId } from "./_config/graphs-tabs-config";
import { FindingsViewSSR } from "./findings-view";
import { RiskPipelineViewSSR } from "./risk-pipeline-view/risk-pipeline-view.ssr";
import { RiskPlotSSR } from "./risk-plot/risk-plot.ssr";
import { ThreatMapViewSSR } from "./threat-map-view/threat-map-view.ssr";
// TODO: Uncomment when ready to enable other tabs
// import { RiskPlotView } from "./risk-plot/risk-plot-view";
// import { RiskRadarViewSSR } from "./risk-radar-view/risk-radar-view.ssr";
const LoadingFallback = () => (
@@ -25,9 +25,9 @@ const GRAPH_COMPONENTS: Record<TabId, GraphComponent> = {
findings: FindingsViewSSR as GraphComponent,
"risk-pipeline": RiskPipelineViewSSR as GraphComponent,
"threat-map": ThreatMapViewSSR as GraphComponent,
"risk-plot": RiskPlotSSR as GraphComponent,
// TODO: Uncomment when ready to enable other tabs
// "risk-radar": RiskRadarViewSSR as GraphComponent,
// "risk-plot": RiskPlotView as GraphComponent,
};
interface GraphsTabsWrapperProps {
@@ -1,9 +1,19 @@
"use client";
/**
* Risk Plot Client Component
*
* NOTE: This component uses CSS variables (var()) for Recharts styling.
* Recharts SVG-based components (Scatter, XAxis, YAxis, CartesianGrid, etc.)
* do not support Tailwind classes and require raw color values or CSS variables.
* This is a documented limitation of the Recharts library.
* @see https://recharts.org/en-US/api
*/
import { useRouter, useSearchParams } from "next/navigation";
import { useState } from "react";
import {
CartesianGrid,
Legend,
ResponsiveContainer,
Scatter,
ScatterChart,
@@ -12,6 +22,7 @@ import {
YAxis,
} from "recharts";
import type { RiskPlotPoint } from "@/actions/overview/risk-plot";
import { HorizontalBarChart } from "@/components/graphs/horizontal-bar-chart";
import { AlertPill } from "@/components/graphs/shared/alert-pill";
import { ChartLegend } from "@/components/graphs/shared/chart-legend";
@@ -19,69 +30,83 @@ import {
AXIS_FONT_SIZE,
CustomXAxisTick,
} from "@/components/graphs/shared/custom-axis-tick";
import { getSeverityColorByRiskScore } from "@/components/graphs/shared/utils";
import type { BarDataPoint } from "@/components/graphs/types";
import { mapProviderFiltersForFindings } from "@/lib/provider-helpers";
import { SEVERITY_FILTER_MAP } from "@/types/severities";
const PROVIDER_COLORS = {
AWS: "var(--color-bg-data-aws)",
Azure: "var(--color-bg-data-azure)",
Google: "var(--color-bg-data-gcp)",
};
// Threat Score colors (0-100 scale, higher = better)
const THREAT_COLORS = {
DANGER: "var(--bg-fail-primary)", // 0-30
WARNING: "var(--bg-warning-primary)", // 31-60
SUCCESS: "var(--bg-pass-primary)", // 61-100
} as const;
export interface ScatterPoint {
x: number;
y: number;
provider: string;
name: string;
severityData?: BarDataPoint[];
/**
* Get color based on ThreatScore (0-100 scale, higher = better)
*/
function getThreatScoreColor(score: number): string {
if (score > 60) return THREAT_COLORS.SUCCESS;
if (score > 30) return THREAT_COLORS.WARNING;
return THREAT_COLORS.DANGER;
}
// Provider colors from globals.css
const PROVIDER_COLORS: Record<string, string> = {
AWS: "var(--bg-data-aws)",
Azure: "var(--bg-data-azure)",
"Google Cloud": "var(--bg-data-gcp)",
Kubernetes: "var(--bg-data-kubernetes)",
"Microsoft 365": "var(--bg-data-m365)",
GitHub: "var(--bg-data-github)",
"MongoDB Atlas": "var(--bg-data-azure)",
"Infrastructure as Code": "var(--bg-data-kubernetes)",
"Oracle Cloud Infrastructure": "var(--bg-data-gcp)",
};
interface RiskPlotClientProps {
data: ScatterPoint[];
data: RiskPlotPoint[];
}
interface TooltipProps {
active?: boolean;
payload?: Array<{ payload: ScatterPoint }>;
payload?: Array<{ payload: RiskPlotPoint }>;
}
interface ScatterDotProps {
// Props that Recharts passes to the shape component
interface RechartsScatterDotProps {
cx: number;
cy: number;
payload: ScatterPoint;
selectedPoint: ScatterPoint | null;
onSelectPoint: (point: ScatterPoint) => void;
allData: ScatterPoint[];
payload: RiskPlotPoint;
}
interface LegendProps {
payload?: Array<{ value: string; color: string }>;
// Extended props for our custom scatter dot component
interface ScatterDotProps extends RechartsScatterDotProps {
selectedPoint: RiskPlotPoint | null;
onSelectPoint: (point: RiskPlotPoint) => void;
allData: RiskPlotPoint[];
selectedProvider: string | null;
}
const CustomTooltip = ({ active, payload }: TooltipProps) => {
if (active && payload && payload.length) {
const data = payload[0].payload;
const severityColor = getSeverityColorByRiskScore(data.x);
if (!active || !payload?.length) return null;
return (
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
{data.name}
</p>
<p className="text-text-neutral-secondary text-sm font-medium">
{/* Dynamic color from getSeverityColorByRiskScore - required inline style */}
<span style={{ color: severityColor, fontWeight: "bold" }}>
{data.x}
</span>{" "}
Risk Score
</p>
<div className="mt-2">
<AlertPill value={data.y} />
</div>
const { name, x, y } = payload[0].payload;
const scoreColor = getThreatScoreColor(x);
return (
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
{name}
</p>
<p className="text-text-neutral-secondary text-sm font-medium">
<span style={{ color: scoreColor, fontWeight: "bold" }}>{x}%</span>{" "}
Threat Score
</p>
<div className="mt-2">
<AlertPill value={y} />
</div>
);
}
return null;
</div>
);
};
const CustomScatterDot = ({
@@ -91,24 +116,31 @@ const CustomScatterDot = ({
selectedPoint,
onSelectPoint,
allData,
selectedProvider,
}: ScatterDotProps) => {
const isSelected = selectedPoint?.name === payload.name;
const size = isSelected ? 18 : 8;
const selectedColor = "var(--bg-button-primary)"; // emerald-400
const selectedColor = "var(--bg-button-primary)";
const fill = isSelected
? selectedColor
: PROVIDER_COLORS[payload.provider as keyof typeof PROVIDER_COLORS] ||
"var(--color-text-neutral-tertiary)";
: PROVIDER_COLORS[payload.provider] || "var(--color-text-neutral-tertiary)";
const isFaded =
selectedProvider !== null && payload.provider !== selectedProvider;
const handleClick = () => {
const fullDataItem = allData?.find(
(d: ScatterPoint) => d.name === payload.name,
);
const fullDataItem = allData?.find((d) => d.name === payload.name);
onSelectPoint?.(fullDataItem || payload);
};
return (
<g style={{ cursor: "pointer" }} onClick={handleClick}>
<g
style={{
cursor: "pointer",
opacity: isFaded ? 0.2 : 1,
transition: "opacity 0.2s",
}}
onClick={handleClick}
>
{isSelected && (
<>
<circle
@@ -143,60 +175,86 @@ const CustomScatterDot = ({
);
};
const CustomLegend = ({ payload }: LegendProps) => {
const items =
payload?.map((entry: { value: string; color: string }) => ({
label: entry.value,
color: entry.color,
})) || [];
return <ChartLegend items={items} />;
};
/**
* Factory function that creates a scatter dot shape component with closure over selection state.
* Recharts shape prop types the callback parameter as `unknown` due to its flexible API.
* We safely cast to RechartsScatterDotProps since we know the actual shape of props passed by Scatter.
* @see https://recharts.org/en-US/api/Scatter#shape
*/
function createScatterDotShape(
selectedPoint: ScatterPoint | null,
onSelectPoint: (point: ScatterPoint) => void,
allData: ScatterPoint[],
) {
const ScatterDotShape = (props: unknown) => {
const dotProps = props as Omit<
ScatterDotProps,
"selectedPoint" | "onSelectPoint" | "allData"
>;
return (
<CustomScatterDot
{...dotProps}
selectedPoint={selectedPoint}
onSelectPoint={onSelectPoint}
allData={allData}
/>
);
};
selectedPoint: RiskPlotPoint | null,
onSelectPoint: (point: RiskPlotPoint) => void,
allData: RiskPlotPoint[],
selectedProvider: string | null,
): (props: unknown) => React.JSX.Element {
const ScatterDotShape = (props: unknown) => (
<CustomScatterDot
{...(props as RechartsScatterDotProps)}
selectedPoint={selectedPoint}
onSelectPoint={onSelectPoint}
allData={allData}
selectedProvider={selectedProvider}
/>
);
ScatterDotShape.displayName = "ScatterDotShape";
return ScatterDotShape;
}
export function RiskPlotClient({ data }: RiskPlotClientProps) {
const [selectedPoint, setSelectedPoint] = useState<ScatterPoint | null>(null);
const router = useRouter();
const searchParams = useSearchParams();
const [selectedPoint, setSelectedPoint] = useState<RiskPlotPoint | null>(
null,
);
const [selectedProvider, setSelectedProvider] = useState<string | null>(null);
const dataByProvider = data.reduce(
// Group data by provider for separate Scatter series
const dataByProvider = data.reduce<Record<string, RiskPlotPoint[]>>(
(acc, point) => {
const provider = point.provider;
if (!acc[provider]) {
acc[provider] = [];
}
acc[provider].push(point);
(acc[point.provider] ??= []).push(point);
return acc;
},
{} as Record<string, typeof data>,
{},
);
const handleSelectPoint = (point: ScatterPoint) => {
if (selectedPoint?.name === point.name) {
setSelectedPoint(null);
} else {
setSelectedPoint(point);
const providers = Object.keys(dataByProvider);
const handleSelectPoint = (point: RiskPlotPoint) => {
setSelectedPoint((current) =>
current?.name === point.name ? null : point,
);
};
const handleProviderClick = (provider: string) => {
setSelectedProvider((current) => (current === provider ? null : provider));
};
const handleBarClick = (dataPoint: BarDataPoint) => {
if (!selectedPoint) return;
// Build the URL with current filters
const params = new URLSearchParams(searchParams.toString());
// Transform provider filters (provider_id__in -> provider__in)
mapProviderFiltersForFindings(params);
// Add severity filter
const severity = SEVERITY_FILTER_MAP[dataPoint.name];
if (severity) {
params.set("filter[severity__in]", severity);
}
// Add provider filter for the selected point
params.set("filter[provider__in]", selectedPoint.providerId);
// Add exclude muted findings filter
params.set("filter[muted]", "false");
// Filter by FAIL findings
params.set("filter[status__in]", "FAIL");
// Navigate to findings page
router.push(`/findings?${params.toString()}`);
};
return (
@@ -204,26 +262,14 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<div className="flex flex-1 gap-12">
{/* Plot Section - in Card */}
<div className="flex basis-[70%] flex-col">
<div
className="flex flex-1 flex-col rounded-lg border p-4"
style={{
borderColor: "var(--border-neutral-primary)",
backgroundColor: "var(--bg-neutral-secondary)",
}}
>
<div className="border-border-neutral-primary bg-bg-neutral-secondary flex flex-1 flex-col rounded-lg border p-4">
<div className="mb-4">
<h3
className="text-lg font-semibold"
style={{ color: "var(--text-neutral-primary)" }}
>
<h3 className="text-text-neutral-primary text-lg font-semibold">
Risk Plot
</h3>
</div>
<div
className="relative w-full flex-1"
style={{ minHeight: "400px" }}
>
<div className="relative min-h-[400px] w-full flex-1">
<ResponsiveContainer width="100%" height="100%">
<ScatterChart
margin={{ top: 20, right: 30, bottom: 60, left: 60 }}
@@ -237,16 +283,16 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<XAxis
type="number"
dataKey="x"
name="Risk Score"
name="Threat Score"
label={{
value: "Risk Score",
value: "Threat Score",
position: "bottom",
offset: 10,
fill: "var(--color-text-neutral-secondary)",
}}
tick={CustomXAxisTick}
tickLine={false}
domain={[0, 10]}
domain={[0, 100]}
axisLine={false}
/>
<YAxis
@@ -268,30 +314,43 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
axisLine={false}
/>
<Tooltip content={<CustomTooltip />} />
<Legend
content={<CustomLegend />}
wrapperStyle={{ paddingTop: "40px" }}
/>
{Object.entries(dataByProvider).map(([provider, points]) => (
<Scatter
key={provider}
name={provider}
data={points}
fill={
PROVIDER_COLORS[
provider as keyof typeof PROVIDER_COLORS
] || "var(--color-text-neutral-tertiary)"
PROVIDER_COLORS[provider] ||
"var(--color-text-neutral-tertiary)"
}
shape={createScatterDotShape(
selectedPoint,
handleSelectPoint,
data,
selectedProvider,
)}
/>
))}
</ScatterChart>
</ResponsiveContainer>
</div>
{/* Interactive Legend - below chart */}
<div className="mt-4 flex flex-col items-start gap-2">
<p className="text-text-neutral-tertiary pl-2 text-xs">
Click to filter by provider.
</p>
<ChartLegend
items={providers.map((p) => ({
label: p,
color:
PROVIDER_COLORS[p] || "var(--color-text-neutral-tertiary)",
dataKey: p,
}))}
selectedItem={selectedProvider}
onItemClick={handleProviderClick}
/>
</div>
</div>
</div>
@@ -300,28 +359,22 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
{selectedPoint && selectedPoint.severityData ? (
<div className="flex w-full flex-col">
<div className="mb-4">
<h4
className="text-base font-semibold"
style={{ color: "var(--text-neutral-primary)" }}
>
<h4 className="text-text-neutral-primary text-base font-semibold">
{selectedPoint.name}
</h4>
<p
className="text-xs"
style={{ color: "var(--text-neutral-tertiary)" }}
>
Risk Score: {selectedPoint.x} | Failed Findings:{" "}
<p className="text-text-neutral-tertiary text-xs">
Threat Score: {selectedPoint.x}% | Failed Findings:{" "}
{selectedPoint.y}
</p>
</div>
<HorizontalBarChart data={selectedPoint.severityData} />
<HorizontalBarChart
data={selectedPoint.severityData}
onBarClick={handleBarClick}
/>
</div>
) : (
<div className="flex w-full items-center justify-center text-center">
<p
className="text-sm"
style={{ color: "var(--text-neutral-tertiary)" }}
>
<p className="text-text-neutral-tertiary text-sm">
Select a point on the plot to view details
</p>
</div>
@@ -1,191 +0,0 @@
import { RiskPlotClient, type ScatterPoint } from "./risk-plot-client";
// Mock data - Risk Score (0-10) vs Failed Findings count
const mockScatterData: ScatterPoint[] = [
{
x: 9.2,
y: 1456,
provider: "AWS",
name: "Amazon RDS",
severityData: [
{ name: "Critical", value: 456 },
{ name: "High", value: 600 },
{ name: "Medium", value: 250 },
{ name: "Low", value: 120 },
{ name: "Info", value: 30 },
],
},
{
x: 8.5,
y: 892,
provider: "AWS",
name: "Amazon EC2",
severityData: [
{ name: "Critical", value: 280 },
{ name: "High", value: 350 },
{ name: "Medium", value: 180 },
{ name: "Low", value: 70 },
{ name: "Info", value: 12 },
],
},
{
x: 7.1,
y: 445,
provider: "AWS",
name: "Amazon S3",
severityData: [
{ name: "Critical", value: 140 },
{ name: "High", value: 180 },
{ name: "Medium", value: 90 },
{ name: "Low", value: 30 },
{ name: "Info", value: 5 },
],
},
{
x: 6.3,
y: 678,
provider: "AWS",
name: "AWS Lambda",
severityData: [
{ name: "Critical", value: 214 },
{ name: "High", value: 270 },
{ name: "Medium", value: 135 },
{ name: "Low", value: 54 },
{ name: "Info", value: 5 },
],
},
{
x: 4.2,
y: 156,
provider: "AWS",
name: "AWS Backup",
severityData: [
{ name: "Critical", value: 49 },
{ name: "High", value: 62 },
{ name: "Medium", value: 31 },
{ name: "Low", value: 12 },
{ name: "Info", value: 2 },
],
},
{
x: 8.8,
y: 1023,
provider: "Azure",
name: "Azure SQL Database",
severityData: [
{ name: "Critical", value: 323 },
{ name: "High", value: 410 },
{ name: "Medium", value: 205 },
{ name: "Low", value: 82 },
{ name: "Info", value: 3 },
],
},
{
x: 7.9,
y: 834,
provider: "Azure",
name: "Azure Virtual Machines",
severityData: [
{ name: "Critical", value: 263 },
{ name: "High", value: 334 },
{ name: "Medium", value: 167 },
{ name: "Low", value: 67 },
{ name: "Info", value: 3 },
],
},
{
x: 6.4,
y: 567,
provider: "Azure",
name: "Azure Storage",
severityData: [
{ name: "Critical", value: 179 },
{ name: "High", value: 227 },
{ name: "Medium", value: 113 },
{ name: "Low", value: 45 },
{ name: "Info", value: 3 },
],
},
{
x: 5.1,
y: 289,
provider: "Azure",
name: "Azure Key Vault",
severityData: [
{ name: "Critical", value: 91 },
{ name: "High", value: 115 },
{ name: "Medium", value: 58 },
{ name: "Low", value: 23 },
{ name: "Info", value: 2 },
],
},
{
x: 7.6,
y: 712,
provider: "Google",
name: "Cloud SQL",
severityData: [
{ name: "Critical", value: 225 },
{ name: "High", value: 285 },
{ name: "Medium", value: 142 },
{ name: "Low", value: 57 },
{ name: "Info", value: 3 },
],
},
{
x: 6.9,
y: 623,
provider: "Google",
name: "Compute Engine",
severityData: [
{ name: "Critical", value: 197 },
{ name: "High", value: 249 },
{ name: "Medium", value: 124 },
{ name: "Low", value: 50 },
{ name: "Info", value: 3 },
],
},
{
x: 5.8,
y: 412,
provider: "Google",
name: "Cloud Storage",
severityData: [
{ name: "Critical", value: 130 },
{ name: "High", value: 165 },
{ name: "Medium", value: 82 },
{ name: "Low", value: 33 },
{ name: "Info", value: 2 },
],
},
{
x: 4.5,
y: 198,
provider: "Google",
name: "Cloud Run",
severityData: [
{ name: "Critical", value: 63 },
{ name: "High", value: 79 },
{ name: "Medium", value: 39 },
{ name: "Low", value: 16 },
{ name: "Info", value: 1 },
],
},
{
x: 8.9,
y: 945,
provider: "AWS",
name: "Amazon RDS Aurora",
severityData: [
{ name: "Critical", value: 299 },
{ name: "High", value: 378 },
{ name: "Medium", value: 189 },
{ name: "Low", value: 76 },
{ name: "Info", value: 3 },
],
},
];
export function RiskPlotView() {
return <RiskPlotClient data={mockScatterData} />;
}
@@ -0,0 +1,91 @@
import { Info } from "lucide-react";
import {
adaptToRiskPlotData,
getProvidersRiskData,
} from "@/actions/overview/risk-plot";
import { getProviders } from "@/actions/providers";
import { SearchParamsProps } from "@/types";
import { pickFilterParams } from "../../_lib/filter-params";
import { RiskPlotClient } from "./risk-plot-client";
export async function RiskPlotSSR({
searchParams,
}: {
searchParams: SearchParamsProps;
}) {
const filters = pickFilterParams(searchParams);
const providerTypeFilter = filters["filter[provider_type__in]"];
const providerIdFilter = filters["filter[provider_id__in]"];
// Fetch all providers
const providersListResponse = await getProviders({ pageSize: 200 });
const allProviders = providersListResponse?.data || [];
// Filter providers based on search params
let filteredProviders = allProviders;
if (providerIdFilter) {
// Filter by specific provider IDs
const selectedIds = String(providerIdFilter)
.split(",")
.map((id) => id.trim());
filteredProviders = allProviders.filter((p) => selectedIds.includes(p.id));
} else if (providerTypeFilter) {
// Filter by provider types
const selectedTypes = String(providerTypeFilter)
.split(",")
.map((t) => t.trim().toLowerCase());
filteredProviders = allProviders.filter((p) =>
selectedTypes.includes(p.attributes.provider.toLowerCase()),
);
}
// No providers to show
if (filteredProviders.length === 0) {
return (
<div className="flex h-[460px] w-full items-center justify-center">
<div className="flex flex-col items-center gap-2 text-center">
<Info size={48} className="text-text-neutral-tertiary" />
<p className="text-text-neutral-secondary text-sm">
No providers available for the selected filters
</p>
</div>
</div>
);
}
// Fetch risk data for all filtered providers in parallel
const providersRiskData = await getProvidersRiskData(filteredProviders);
// Transform to chart format
const { points, providersWithoutData } =
adaptToRiskPlotData(providersRiskData);
// No data available
if (points.length === 0) {
return (
<div className="flex h-[460px] w-full items-center justify-center">
<div className="flex flex-col items-center gap-2 text-center">
<Info size={48} className="text-text-neutral-tertiary" />
<p className="text-text-neutral-secondary text-sm">
No risk data available for the selected providers
</p>
{providersWithoutData.length > 0 && (
<p className="text-text-neutral-tertiary text-xs">
{providersWithoutData.length} provider(s) have no completed scans
</p>
)}
</div>
</div>
);
}
return (
<div className="w-full flex-1 overflow-visible">
<RiskPlotClient data={points} />
</div>
);
}
@@ -13,6 +13,7 @@ import {
SeverityLevel,
} from "@/types/severities";
import { DEFAULT_TIME_RANGE } from "../_constants/time-range.constants";
import { type TimeRange, TimeRangeSelector } from "./time-range-selector";
interface FindingSeverityOverTimeProps {
@@ -24,7 +25,7 @@ export const FindingSeverityOverTime = ({
}: FindingSeverityOverTimeProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const [timeRange, setTimeRange] = useState<TimeRange>("5D");
const [timeRange, setTimeRange] = useState<TimeRange>(DEFAULT_TIME_RANGE);
const [data, setData] = useState<LineDataPoint[]>(initialData);
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
@@ -2,14 +2,12 @@
import { cn } from "@/lib/utils";
const TIME_RANGE_OPTIONS = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
import {
TIME_RANGE_OPTIONS,
type TimeRange,
} from "../_constants/time-range.constants";
export type TimeRange =
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
export type { TimeRange };
interface TimeRangeSelectorProps {
value: TimeRange;
@@ -0,0 +1 @@
export * from "./time-range.constants";
@@ -0,0 +1,23 @@
export const TIME_RANGE_OPTIONS = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
export type TimeRange =
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
export const TIME_RANGE_DAYS: Record<TimeRange, number> = {
"5D": 5,
"1W": 7,
"1M": 30,
};
export const DEFAULT_TIME_RANGE: TimeRange = "5D";
export const getDateFromForTimeRange = (timeRange: TimeRange): string => {
const days = TIME_RANGE_DAYS[timeRange];
const date = new Date();
date.setDate(date.getDate() - days);
return date.toISOString().split("T")[0];
};
@@ -1,10 +1,11 @@
import { getFindingsSeverityTrends } from "@/actions/overview/severity-trends";
import { getSeverityTrendsByTimeRange } from "@/actions/overview/severity-trends";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/shadcn";
import { pickFilterParams } from "../_lib/filter-params";
import { SSRComponentProps } from "../_types";
import { FindingSeverityOverTime } from "./_components/finding-severity-over-time";
import { FindingSeverityOverTimeSkeleton } from "./_components/finding-severity-over-time.skeleton";
import { DEFAULT_TIME_RANGE } from "./_constants/time-range.constants";
export { FindingSeverityOverTimeSkeleton };
@@ -25,7 +26,11 @@ export const FindingSeverityOverTimeSSR = async ({
searchParams,
}: SSRComponentProps) => {
const filters = pickFilterParams(searchParams);
const result = await getFindingsSeverityTrends({ filters });
const result = await getSeverityTrendsByTimeRange({
timeRange: DEFAULT_TIME_RANGE,
filters,
});
if (result.status === "error") {
return <EmptyState message="Failed to load severity trends data" />;
+4 -2
View File
@@ -2,10 +2,11 @@ import "@/styles/globals.css";
import * as Sentry from "@sentry/nextjs";
import { Metadata, Viewport } from "next";
import React from "react";
import { ReactNode } from "react";
import { getProviders } from "@/actions/providers";
import MainLayout from "@/components/ui/main-layout/main-layout";
import { NavigationProgress } from "@/components/ui/navigation-progress";
import { Toaster } from "@/components/ui/toast";
import { fontSans } from "@/config/fonts";
import { siteConfig } from "@/config/site";
@@ -38,7 +39,7 @@ export const viewport: Viewport = {
export default async function RootLayout({
children,
}: {
children: React.ReactNode;
children: ReactNode;
}) {
const providersData = await getProviders({ page: 1, pageSize: 1 });
const hasProviders = !!(providersData?.data && providersData.data.length > 0);
@@ -54,6 +55,7 @@ export default async function RootLayout({
)}
>
<Providers themeProps={{ attribute: "class", defaultTheme: "dark" }}>
<NavigationProgress />
<StoreInitializer values={{ hasProviders }} />
<MainLayout>{children}</MainLayout>
<Toaster />
+2 -2
View File
@@ -32,11 +32,11 @@ export function AlertPill({
>
<AlertTriangle
size={iconSize}
style={{ color: "var(--color-text-text-error)" }}
style={{ color: "var(--color-text-error-primary)" }}
/>
<span
className={cn(textSizeClass, "font-semibold")}
style={{ color: "var(--color-text-text-error)" }}
style={{ color: "var(--color-text-error-primary)" }}
>
{value}
</span>
+1
View File
@@ -12,6 +12,7 @@ export * from "./feedback-banner/feedback-banner";
export * from "./headers/navigation-header";
export * from "./label/Label";
export * from "./main-layout/main-layout";
export * from "./navigation-progress";
export * from "./select";
export * from "./sidebar";
export * from "./toast";
@@ -0,0 +1,7 @@
export { NavigationProgress } from "./navigation-progress";
export {
cancelProgress,
completeProgress,
startProgress,
useNavigationProgress,
} from "./use-navigation-progress";
@@ -0,0 +1,42 @@
"use client";
import { useEffect, useState } from "react";
import { cn } from "@/lib";
import { useNavigationProgress } from "./use-navigation-progress";
const HIDE_DELAY_MS = 200;
export function NavigationProgress() {
const { isLoading, progress } = useNavigationProgress();
const [visible, setVisible] = useState(false);
useEffect(() => {
if (isLoading) return setVisible(true);
const timeout = setTimeout(() => setVisible(false), HIDE_DELAY_MS);
return () => clearTimeout(timeout);
}, [isLoading]);
if (!visible) return null;
return (
<div
className="fixed top-0 left-0 z-[99999] h-[3px] w-full"
role="progressbar"
aria-valuenow={progress}
aria-valuemin={0}
aria-valuemax={100}
aria-label="Page loading progress"
>
<div
className={cn(
"bg-button-primary h-full transition-all duration-200 ease-out",
isLoading && "shadow-progress-glow",
)}
style={{ width: `${progress}%` }}
/>
</div>
);
}
@@ -0,0 +1,106 @@
"use client";
import { usePathname, useSearchParams } from "next/navigation";
import { useEffect, useSyncExternalStore } from "react";
interface ProgressState {
isLoading: boolean;
progress: number;
}
// Global state
let state: ProgressState = { isLoading: false, progress: 0 };
const listeners = new Set<() => void>();
let progressInterval: ReturnType<typeof setInterval> | null = null;
let timeoutId: ReturnType<typeof setTimeout> | null = null;
// Cached server snapshot to avoid infinite loop with useSyncExternalStore
const SERVER_SNAPSHOT: ProgressState = { isLoading: false, progress: 0 };
function notify() {
listeners.forEach((listener) => listener());
}
function setState(newState: ProgressState) {
state = newState;
notify();
}
function clearTimers() {
if (progressInterval) {
clearInterval(progressInterval);
progressInterval = null;
}
if (timeoutId) {
clearTimeout(timeoutId);
timeoutId = null;
}
}
/**
* Start the progress bar animation.
* Progress increases quickly at first, then slows down as it approaches 90%.
*/
export function startProgress() {
clearTimers();
setState({ isLoading: true, progress: 0 });
progressInterval = setInterval(() => {
if (state.progress < 90) {
const increment = (90 - state.progress) * 0.1;
setState({
...state,
progress: Math.min(90, state.progress + increment),
});
}
}, 100);
}
/**
* Complete the progress bar animation.
* Jumps to 100% and then hides after a brief delay.
*/
export function completeProgress() {
clearTimers();
setState({ isLoading: false, progress: 100 });
timeoutId = setTimeout(() => {
setState({ isLoading: false, progress: 0 });
timeoutId = null;
}, 200);
}
/**
* Cancel the progress bar immediately without animation.
*/
export function cancelProgress() {
clearTimers();
setState({ isLoading: false, progress: 0 });
}
/**
* Hook to access progress bar state.
* Automatically completes progress when URL changes.
*/
export function useNavigationProgress() {
const pathname = usePathname();
const searchParams = useSearchParams();
const currentState = useSyncExternalStore(
(listener) => {
listeners.add(listener);
return () => listeners.delete(listener);
},
() => state,
() => SERVER_SNAPSHOT,
);
// Complete progress when URL changes (only if currently loading)
useEffect(() => {
if (state.isLoading) {
completeProgress();
}
}, [pathname, searchParams]);
return currentState;
}
+44
View File
@@ -0,0 +1,44 @@
/**
* Next.js Client Instrumentation
*
* This file runs on the client before React hydration.
* Used to set up navigation progress tracking.
*
* @see https://nextjs.org/docs/app/api-reference/file-conventions/instrumentation-client
*/
import {
cancelProgress,
startProgress,
} from "@/components/ui/navigation-progress/use-navigation-progress";
const NAVIGATION_TYPE = {
PUSH: "push",
REPLACE: "replace",
TRAVERSE: "traverse",
} as const;
type NavigationType = (typeof NAVIGATION_TYPE)[keyof typeof NAVIGATION_TYPE];
function getCurrentUrl(): string {
return window.location.pathname + window.location.search;
}
/**
* Called by Next.js when router navigation begins.
* Triggers the navigation progress bar.
*/
export function onRouterTransitionStart(
url: string,
_navigationType: NavigationType,
) {
const currentUrl = getCurrentUrl();
if (url === currentUrl) {
// Same URL - cancel any ongoing progress
cancelProgress();
} else {
// Different URL - start progress
startProgress();
}
}
+9
View File
@@ -75,6 +75,9 @@
/* Chart Dots */
--chart-dots: var(--color-neutral-200);
/* Progress Bar */
--shadow-progress-glow: 0 0 10px var(--bg-button-primary), 0 0 5px var(--bg-button-primary);
}
/* ===== DARK THEME ===== */
@@ -144,6 +147,9 @@
/* Chart Dots */
--chart-dots: var(--text-neutral-primary);
/* Progress Bar */
--shadow-progress-glow: 0 0 10px var(--bg-button-primary), 0 0 5px var(--bg-button-primary);
}
/* ===== TAILWIND THEME MAPPINGS ===== */
@@ -211,6 +217,9 @@
--color-bg-warning: var(--bg-warning-primary);
--color-bg-fail: var(--bg-fail-primary);
--color-bg-fail-secondary: var(--bg-fail-secondary);
/* Shadows */
--shadow-progress-glow: var(--shadow-progress-glow);
}
/* ===== CONTAINER UTILITY ===== */