Compare commits

...

19 Commits

Author SHA1 Message Date
Alan Buscaglia 2b0cadabf7 chore(ci): update UI E2E tests workflow for cloud environments
- Rename workflow to 'UI - E2E Cloud Tests'
- Add support for dev, stg, and pro environments
- Add workflow_run trigger after API/UI deployments
- Add workflow_dispatch for manual environment selection
- Add Tailscale setup for VPN access to non-prod environments
- Add deployment verification steps for STG and PRO
- Update environment variables for cloud-based testing
- Remove local docker-compose setup in favor of cloud APIs
2025-12-10 10:15:42 +01:00
Pedro Martín d786bb4440 fix(compliance): make unique requirements IDs for ISO27001 2013 - AWS (#9488) 2025-12-10 09:54:05 +01:00
KonstGolfi 9424289416 feat(compliance): add RBI Framework for Azure (#8822)
Co-authored-by: pedrooot <pedromarting3@gmail.com>
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2025-12-10 09:24:35 +01:00
Pedro Martín 3cbb6175a5 feat(compliance): add SOC2 Azure Processing Integrity requirements (#9463) 2025-12-10 08:53:08 +01:00
Pedro Martín 438deef3f8 feat(compliance): add SOC2 GCP Processing Integrity requirements (#9464) 2025-12-10 08:45:53 +01:00
Pedro Martín 1cdf4e65b2 feat(compliance): add SOC2 AWS Processing Integrity requirements (#9462) 2025-12-10 08:41:56 +01:00
Andoni Alonso dbdd02ebd1 fix(docs): solve broken link (#9493) 2025-12-10 08:09:25 +01:00
Pedro Martín d264f3daff fix(deps): install alibabacloud missing dep (#9487) 2025-12-09 17:18:32 +01:00
Hugo Pereira Brito 01fe379b55 fix: remove incorrect threat-detection category from checks (#9489) 2025-12-09 17:11:09 +01:00
Pedro Martín 50286846e0 fix(ui): show Top Failed Requirements for compliances without section hierarchy (#9471)
Co-authored-by: Alan Buscaglia <gentlemanprogramming@gmail.com>
2025-12-09 16:28:47 +01:00
Rubén De la Torre Vico 20ed8b3d2d fix: MCP findings tools errors (#9477) 2025-12-09 15:16:55 +01:00
Alan Buscaglia 45cc6e8b85 fix(ui): improve overview charts UX and consistency (#9484) 2025-12-09 13:33:41 +01:00
Hugo Pereira Brito 962c64eae5 chore: execute tests for only needed aws services (#9468) 2025-12-09 11:06:07 +01:00
César Arroba 7b56f0640f chore(github): fix release messages (#9459) 2025-12-09 10:06:55 +01:00
Alan Buscaglia 49c75cc418 fix(ui): add default date_from filter for severity over time endpoint (#9472) 2025-12-05 17:55:04 +01:00
Alan Buscaglia 56bca7c104 feat(ui): implement Risk Plot component with interactive legend and navigation (#9469) 2025-12-05 14:03:58 +01:00
Rubén De la Torre Vico faaa172b86 chore(aws): enhance metadata for macie service (#9265)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-12-05 12:03:13 +01:00
Alan Buscaglia 219ce0ba89 feat(ui): add navigation progress bar for better UX during page transitions (#9465) 2025-12-05 12:01:00 +01:00
Adrián Peña 2170e5fe12 feat(api): add findings severity timeseries endpoint (#9363) 2025-12-05 11:19:37 +01:00
82 changed files with 3506 additions and 1069 deletions
+61 -33
View File
@@ -48,8 +48,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -78,21 +104,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push API container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -106,23 +117,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -169,6 +163,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: API
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+61 -33
View File
@@ -47,8 +47,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -76,21 +102,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push MCP container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -112,23 +123,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -175,6 +169,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: MCP
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+104 -78
View File
@@ -50,30 +50,15 @@ env:
AWS_REGION: us-east-1
jobs:
container-build-push:
setup:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ${{ matrix.runner }}
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
arch: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
arch: arm64
timeout-minutes: 45
permissions:
contents: read
packages: write
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
env:
POETRY_VIRTUALENVS_CREATE: 'false'
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
@@ -93,32 +78,24 @@ jobs:
run: |
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
# Extract major version
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
# Set version-specific tags
case ${PROWLER_VERSION_MAJOR} in
3)
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
;;
4)
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
;;
5)
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
echo "✓ Prowler v5 detected - tags: latest, stable"
@@ -129,6 +106,53 @@ jobs:
;;
esac
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: SDK
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
arch: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
arch: arm64
timeout-minutes: 45
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
@@ -147,21 +171,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: SDK
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push SDK container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -172,30 +181,13 @@ jobs:
push: true
platforms: ${{ matrix.platform }}
tags: |
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}-${{ matrix.arch }}
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: SDK
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [container-build-push]
needs: [setup, container-build-push]
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
@@ -222,24 +214,24 @@ jobs:
if: github.event_name == 'push'
run: |
docker buildx imagetools create \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
- name: Create and push manifests for release event
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
run: |
docker buildx imagetools create \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.prowler_version }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.stable_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.prowler_version }} \
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.stable_tag }} \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
- name: Install regctl
if: always()
@@ -249,13 +241,47 @@ jobs:
if: always()
run: |
echo "Cleaning up intermediate tags..."
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64" || true
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: SDK
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
dispatch-v3-deployment:
if: needs.container-build-push.outputs.prowler_version_major == '3'
needs: container-build-push
if: needs.setup.outputs.prowler_version_major == '3'
needs: [setup, container-build-push]
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
@@ -282,4 +308,4 @@ jobs:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
event-type: dispatch
client-payload: '{"version":"release","tag":"${{ needs.container-build-push.outputs.prowler_version }}"}'
client-payload: '{"version":"release","tag":"${{ needs.setup.outputs.prowler_version }}"}'
+102 -1
View File
@@ -82,9 +82,110 @@ jobs:
./tests/**/aws/**
./poetry.lock
- name: Resolve AWS services under test
if: steps.changed-aws.outputs.any_changed == 'true'
id: aws-services
shell: bash
run: |
python3 <<'PY'
import os
from pathlib import Path
dependents = {
"acm": ["elb"],
"autoscaling": ["dynamodb"],
"awslambda": ["ec2", "inspector2"],
"backup": ["dynamodb", "ec2", "rds"],
"cloudfront": ["shield"],
"cloudtrail": ["awslambda", "cloudwatch"],
"cloudwatch": ["bedrock"],
"ec2": ["dlm", "dms", "elbv2", "emr", "inspector2", "rds", "redshift", "route53", "shield", "ssm"],
"ecr": ["inspector2"],
"elb": ["shield"],
"elbv2": ["shield"],
"globalaccelerator": ["shield"],
"iam": ["bedrock", "cloudtrail", "cloudwatch", "codebuild"],
"kafka": ["firehose"],
"kinesis": ["firehose"],
"kms": ["kafka"],
"organizations": ["iam", "servicecatalog"],
"route53": ["shield"],
"s3": ["bedrock", "cloudfront", "cloudtrail", "macie"],
"ssm": ["ec2"],
"vpc": ["awslambda", "ec2", "efs", "elasticache", "neptune", "networkfirewall", "rds", "redshift", "workspaces"],
"waf": ["elbv2"],
"wafv2": ["cognito", "elbv2"],
}
changed_raw = """${{ steps.changed-aws.outputs.all_changed_files }}"""
# all_changed_files is space-separated, not newline-separated
# Strip leading "./" if present for consistent path handling
changed_files = [Path(f.lstrip("./")) for f in changed_raw.split() if f]
services = set()
run_all = False
for path in changed_files:
path_str = path.as_posix()
parts = path.parts
if path_str.startswith("prowler/providers/aws/services/"):
if len(parts) > 4 and "." not in parts[4]:
services.add(parts[4])
else:
run_all = True
elif path_str.startswith("tests/providers/aws/services/"):
if len(parts) > 4 and "." not in parts[4]:
services.add(parts[4])
else:
run_all = True
elif path_str.startswith("prowler/providers/aws/") or path_str.startswith("tests/providers/aws/"):
run_all = True
# Expand with direct dependent services (one level only)
# We only test services that directly depend on the changed services,
# not transitive dependencies (services that depend on dependents)
original_services = set(services)
for svc in original_services:
for dep in dependents.get(svc, []):
services.add(dep)
if run_all or not services:
run_all = True
services = set()
service_paths = " ".join(sorted(f"tests/providers/aws/services/{svc}" for svc in services))
output_lines = [
f"run_all={'true' if run_all else 'false'}",
f"services={' '.join(sorted(services))}",
f"service_paths={service_paths}",
]
with open(os.environ["GITHUB_OUTPUT"], "a") as gh_out:
for line in output_lines:
gh_out.write(line + "\n")
print(f"AWS changed files (filtered): {changed_raw or 'none'}")
print(f"Run all AWS tests: {run_all}")
if services:
print(f"AWS service test paths: {service_paths}")
else:
print("AWS service test paths: none detected")
PY
- name: Run AWS tests
if: steps.changed-aws.outputs.any_changed == 'true'
run: poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
run: |
echo "AWS run_all=${{ steps.aws-services.outputs.run_all }}"
echo "AWS service_paths='${{ steps.aws-services.outputs.service_paths }}'"
if [ "${{ steps.aws-services.outputs.run_all }}" = "true" ]; then
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
elif [ -z "${{ steps.aws-services.outputs.service_paths }}" ]; then
echo "No AWS service paths detected; skipping AWS tests."
else
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${{ steps.aws-services.outputs.service_paths }}
fi
- name: Upload AWS coverage to Codecov
if: steps.changed-aws.outputs.any_changed == 'true'
+61 -33
View File
@@ -50,8 +50,34 @@ jobs:
id: set-short-sha
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
container-build-push:
notify-release-started:
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: setup
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
container-build-push:
needs: [setup, notify-release-started]
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
runs-on: ${{ matrix.runner }}
strategy:
matrix:
@@ -80,21 +106,6 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Notify container push started
id: slack-notification-started
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
- name: Build and push UI container for ${{ matrix.arch }}
id: container-push
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
@@ -111,23 +122,6 @@ jobs:
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
- name: Notify container push completed
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.container-push.outcome }}
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
# Create and push multi-architecture manifest
create-manifest:
needs: [setup, container-build-push]
@@ -174,6 +168,40 @@ jobs:
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
echo "Cleanup completed"
notify-release-completed:
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
needs: [setup, notify-release-started, container-build-push, create-manifest]
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
run: |
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
echo "outcome=success" >> $GITHUB_OUTPUT
else
echo "outcome=failure" >> $GITHUB_OUTPUT
fi
- name: Notify container push completed
uses: ./.github/actions/slack-notification
env:
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
COMPONENT: UI
RELEASE_TAG: ${{ env.RELEASE_TAG }}
GITHUB_SERVER_URL: ${{ github.server_url }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_RUN_ID: ${{ github.run_id }}
with:
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
step-outcome: ${{ steps.outcome.outputs.outcome }}
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
trigger-deployment:
if: github.event_name == 'push'
needs: [setup, container-build-push]
+210 -113
View File
@@ -1,4 +1,4 @@
name: UI - E2E Tests
name: UI - E2E Cloud Tests
on:
pull_request:
@@ -6,125 +6,185 @@ on:
- master
- "v5.*"
paths:
- '.github/workflows/ui-e2e-tests.yml'
- 'ui/**'
- ".github/workflows/ui-e2e-tests.yml"
- "ui/**"
push:
branches:
- master
- "v5.*"
paths:
- ".github/workflows/ui-e2e-cloud-tests.yml"
- "ui/**"
workflow_run:
workflows:
- "API - Build, Push and Deploy"
- "UI - Build, Push and Deploy"
types: [completed]
branches: [master, v5.*]
workflow_dispatch:
inputs:
environment:
description: "Environment to test"
required: true
default: "dev"
type: choice
options:
- dev
- stg
- pro
permissions:
id-token: write
contents: read
actions: read
jobs:
e2e-tests:
if: github.repository == 'prowler-cloud/prowler'
if: github.repository == 'prowler-cloud/prowler-cloud'
runs-on: ubuntu-latest
env:
AUTH_SECRET: 'fallback-ci-secret-for-testing'
AUTH_TRUST_HOST: true
NEXTAUTH_URL: 'http://localhost:3000'
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
E2E_AZURE_SUBSCRIPTION_ID: ${{ secrets.E2E_AZURE_SUBSCRIPTION_ID }}
E2E_AZURE_CLIENT_ID: ${{ secrets.E2E_AZURE_CLIENT_ID }}
E2E_AZURE_SECRET_ID: ${{ secrets.E2E_AZURE_SECRET_ID }}
E2E_AZURE_TENANT_ID: ${{ secrets.E2E_AZURE_TENANT_ID }}
E2E_M365_DOMAIN_ID: ${{ secrets.E2E_M365_DOMAIN_ID }}
E2E_M365_CLIENT_ID: ${{ secrets.E2E_M365_CLIENT_ID }}
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
E2E_KUBERNETES_CONTEXT: 'kind-kind'
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
NEXTAUTH_URL: "http://localhost:3000"
AUTH_SECRET: "fallback-ci-secret-for-testing"
AUTH_TRUST_HOST: "true"
steps:
- name: Determine environment
id: env
run: |
if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" ]]; then
echo "environment=dev" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_run" && "${{ github.event.workflow_run.conclusion }}" == "success" && "${{ github.event.workflow_run.event }}" == "release" ]]; then
echo "environment=stg" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "environment=${{ github.event.inputs.environment }}" >> $GITHUB_OUTPUT
else
echo "Unknown trigger, skipping..."
exit 1
fi
- name: Set environment variables
id: vars
run: |
case "${{ steps.env.outputs.environment }}" in
"dev")
echo "api_url=https://api.dev.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=DEV_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=DEV_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=DEV" >> $GITHUB_OUTPUT
;;
"stg")
echo "api_url=https://api.stg.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=STG_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=STG_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=STG" >> $GITHUB_OUTPUT
;;
"pro")
echo "api_url=https://api.prowler.com/api/v1" >> $GITHUB_OUTPUT
echo "e2e_user_secret=PRO_E2E_USER" >> $GITHUB_OUTPUT
echo "e2e_password_secret=PRO_E2E_PASSWORD" >> $GITHUB_OUTPUT
echo "environment_name=PRO" >> $GITHUB_OUTPUT
;;
esac
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1
- name: Environment info
env:
ENV_NAME: ${{ steps.vars.outputs.environment_name }}
API_URL: ${{ steps.vars.outputs.api_url }}
run: |
echo "Environment: $ENV_NAME"
echo "API URL: $API_URL"
echo "Workflow: ${{ github.workflow }}"
echo "Event: ${{ github.event_name }}"
echo "Started at: $(date)"
- name: Verify both STG deployments completed
if: steps.env.outputs.environment == 'stg'
env:
GH_TOKEN: ${{ github.token }}
run: |
echo "Verifying that both API and UI deployments completed successfully..."
# Get the latest runs for both workflows triggered by the same release
API_RUN=$(gh run list --workflow="API - Build, Push and Deploy" --event=release --limit=1 --json status,conclusion,createdAt --jq '.[0]')
API_STATUS=$(echo "$API_RUN" | jq -r '.status')
API_CONCLUSION=$(echo "$API_RUN" | jq -r '.conclusion')
UI_RUN=$(gh run list --workflow="UI - Build, Push and Deploy" --event=release --limit=1 --json status,conclusion,createdAt --jq '.[0]')
UI_STATUS=$(echo "$UI_RUN" | jq -r '.status')
UI_CONCLUSION=$(echo "$UI_RUN" | jq -r '.conclusion')
echo "API workflow - Status: $API_STATUS, Conclusion: $API_CONCLUSION"
echo "UI workflow - Status: $UI_STATUS, Conclusion: $UI_CONCLUSION"
# Verify both workflows completed successfully
if [[ "$API_STATUS" != "completed" || "$API_CONCLUSION" != "success" ]]; then
echo "API deployment not ready (Status: $API_STATUS, Conclusion: $API_CONCLUSION)"
exit 1
fi
if [[ "$UI_STATUS" != "completed" || "$UI_CONCLUSION" != "success" ]]; then
echo "UI deployment not ready (Status: $UI_STATUS, Conclusion: $UI_CONCLUSION)"
exit 1
fi
echo "Both API and UI deployments completed successfully for STG"
- name: Verify both PRO deployments completed
if: steps.env.outputs.environment == 'pro'
env:
GH_TOKEN: ${{ github.token }}
run: |
echo "Verifying that both API and UI deployments completed successfully..."
# Get the latest manual runs for both workflows
API_RUN=$(gh run list --workflow="API - Build, Push and Deploy" --event=workflow_dispatch --limit=1 --json status,conclusion,createdAt --jq '.[0]')
API_STATUS=$(echo "$API_RUN" | jq -r '.status')
API_CONCLUSION=$(echo "$API_RUN" | jq -r '.conclusion')
UI_RUN=$(gh run list --workflow="UI - Build, Push and Deploy" --event=workflow_dispatch --limit=1 --json status,conclusion,createdAt --jq '.[0]')
UI_STATUS=$(echo "$UI_RUN" | jq -r '.status')
UI_CONCLUSION=$(echo "$UI_RUN" | jq -r '.conclusion')
echo "API workflow - Status: $API_STATUS, Conclusion: $API_CONCLUSION"
echo "UI workflow - Status: $UI_STATUS, Conclusion: $UI_CONCLUSION"
# Verify both workflows completed successfully
if [[ "$API_STATUS" != "completed" || "$API_CONCLUSION" != "success" ]]; then
echo "API deployment not ready (Status: $API_STATUS, Conclusion: $API_CONCLUSION)"
exit 1
fi
if [[ "$UI_STATUS" != "completed" || "$UI_CONCLUSION" != "success" ]]; then
echo "UI deployment not ready (Status: $UI_STATUS, Conclusion: $UI_CONCLUSION)"
exit 1
fi
echo "Both API and UI deployments completed successfully for PRO"
- name: Setup Tailscale
if: steps.env.outputs.environment != 'pro'
uses: tailscale/github-action@84a3f23bb4d843bcf4da6cf824ec1be473daf4de # v3.2.3
with:
cluster_name: kind
- name: Modify kubeconfig
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
tags: tag:github-actions
- name: Verify API is accessible
env:
API_URL: ${{ steps.vars.outputs.api_url }}
ENV_NAME: ${{ steps.vars.outputs.environment_name }}
run: |
# Modify the kubeconfig to use the kind cluster server to https://kind-control-plane:6443
# from worker service into docker-compose.yml
kubectl config set-cluster kind-kind --server=https://kind-control-plane:6443
kubectl config view
- name: Add network kind to docker compose
run: |
# Add the network kind to the docker compose to interconnect to kind cluster
yq -i '.networks.kind.external = true' docker-compose.yml
# Add network kind to worker service and default network too
yq -i '.services.worker.networks = ["kind","default"]' docker-compose.yml
- name: Fix API data directory permissions
run: docker run --rm -v $(pwd)/_data/api:/data alpine chown -R 1000:1000 /data
- name: Add AWS credentials for testing AWS SDK Default Adding Provider
run: |
echo "Adding AWS credentials for testing AWS SDK Default Adding Provider..."
echo "AWS_ACCESS_KEY_ID=${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}" >> .env
echo "AWS_SECRET_ACCESS_KEY=${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}" >> .env
- name: Start API services
run: |
# Override docker-compose image tag to use latest instead of stable
# This overrides any PROWLER_API_VERSION set in .env file
export PROWLER_API_VERSION=latest
echo "Using PROWLER_API_VERSION=${PROWLER_API_VERSION}"
docker compose up -d api worker worker-beat
- name: Wait for API to be ready
run: |
echo "Waiting for prowler-api..."
timeout=150 # 5 minutes max
elapsed=0
while [ $elapsed -lt $timeout ]; do
if curl -s ${NEXT_PUBLIC_API_BASE_URL}/docs >/dev/null 2>&1; then
echo "Prowler API is ready!"
exit 0
fi
echo "Waiting for prowler-api... (${elapsed}s elapsed)"
sleep 5
elapsed=$((elapsed + 5))
done
echo "Timeout waiting for prowler-api to start"
exit 1
- name: Load database fixtures for E2E tests
run: |
docker compose exec -T api sh -c '
echo "Loading all fixtures from api/fixtures/dev/..."
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
fi
done
echo "All database fixtures loaded successfully!"
'
echo "Checking $ENV_NAME API at $API_URL/docs..."
curl -f --connect-timeout 30 --max-time 60 ${API_URL}/docs
echo "$ENV_NAME API is accessible"
- name: Setup Node.js environment
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with:
node-version: '20.x'
- name: Setup pnpm
uses: pnpm/action-setup@v4
node-version: "20.x"
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
with:
version: 10
version: 9
run_install: false
- name: Get pnpm store directory
shell: bash
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
@@ -137,6 +197,10 @@ jobs:
run: pnpm install --frozen-lockfile
- name: Build UI application
working-directory: ./ui
env:
NEXT_PUBLIC_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
NEXT_PUBLIC_IS_CLOUD_ENV: "true"
CLOUD_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
run: pnpm run build
- name: Cache Playwright browsers
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
@@ -152,17 +216,50 @@ jobs:
run: pnpm run test:e2e:install
- name: Run E2E tests
working-directory: ./ui
run: pnpm run test:e2e
env:
NEXT_PUBLIC_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
NEXT_PUBLIC_IS_CLOUD_ENV: "true"
CLOUD_API_BASE_URL: ${{ steps.vars.outputs.api_url }}
E2E_USER: ${{ secrets[steps.vars.outputs.e2e_user_secret] }}
E2E_PASSWORD: ${{ secrets[steps.vars.outputs.e2e_password_secret] }}
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
E2E_AZURE_SUBSCRIPTION_ID: ${{ secrets.E2E_AZURE_SUBSCRIPTION_ID }}
E2E_AZURE_CLIENT_ID: ${{ secrets.E2E_AZURE_CLIENT_ID }}
E2E_AZURE_SECRET_ID: ${{ secrets.E2E_AZURE_SECRET_ID }}
E2E_AZURE_TENANT_ID: ${{ secrets.E2E_AZURE_TENANT_ID }}
E2E_M365_DOMAIN_ID: ${{ secrets.E2E_M365_DOMAIN_ID }}
E2E_M365_CLIENT_ID: ${{ secrets.E2E_M365_CLIENT_ID }}
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
E2E_KUBERNETES_CONTEXT: "kind-kind"
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
run: pnpm run test:e2e-cloud
- name: Upload test reports
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: failure()
if: always()
with:
name: playwright-report
name: playwright-report-${{ steps.env.outputs.environment }}-${{ github.run_number }}
path: ui/playwright-report/
retention-days: 30
- name: Cleanup services
if: always()
run: |
echo "Shutting down services..."
docker compose down -v || true
echo "Cleanup completed"
+1
View File
@@ -6,6 +6,7 @@ All notable changes to the **Prowler API** are documented in this file.
### Added
- New endpoint to retrieve an overview of the attack surfaces [(#9309)](https://github.com/prowler-cloud/prowler/pull/9309)
- New endpoint `GET /api/v1/overviews/findings_severity/timeseries` to retrieve daily aggregated findings by severity level [(#9363)](https://github.com/prowler-cloud/prowler/pull/9363)
- Lighthouse AI support for Amazon Bedrock API key [(#9343)](https://github.com/prowler-cloud/prowler/pull/9343)
- Exception handler for provider deletions during scans [(#9414)](https://github.com/prowler-cloud/prowler/pull/9414)
- Support to use admin credentials through the read replica database [(#9440)](https://github.com/prowler-cloud/prowler/pull/9440)
+5
View File
@@ -6065,6 +6065,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
@@ -6073,6 +6074,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
@@ -6081,6 +6083,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
@@ -6089,6 +6092,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
@@ -6097,6 +6101,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
+63
View File
@@ -25,6 +25,7 @@ from api.db_utils import (
from api.models import (
AttackSurfaceOverview,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -795,6 +796,68 @@ class ScanSummaryFilter(FilterSet):
}
class DailySeveritySummaryFilter(FilterSet):
"""Filter for findings_severity/timeseries endpoint."""
MAX_DATE_RANGE_DAYS = 365
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
provider_id__in = UUIDInFilter(field_name="provider_id", lookup_expr="in")
provider_type = ChoiceFilter(
field_name="provider__provider", choices=Provider.ProviderChoices.choices
)
provider_type__in = ChoiceInFilter(
field_name="provider__provider", choices=Provider.ProviderChoices.choices
)
date_from = DateFilter(method="filter_noop")
date_to = DateFilter(method="filter_noop")
class Meta:
model = DailySeveritySummary
fields = ["provider_id"]
def filter_noop(self, queryset, name, value):
return queryset
def filter_queryset(self, queryset):
if not self.data.get("date_from"):
raise ValidationError(
[
{
"detail": "This query parameter is required.",
"status": "400",
"source": {"pointer": "filter[date_from]"},
"code": "required",
}
]
)
today = date.today()
date_from = self.form.cleaned_data.get("date_from")
date_to = min(self.form.cleaned_data.get("date_to") or today, today)
if (date_to - date_from).days > self.MAX_DATE_RANGE_DAYS:
raise ValidationError(
[
{
"detail": f"Date range cannot exceed {self.MAX_DATE_RANGE_DAYS} days.",
"status": "400",
"source": {"pointer": "filter[date_from]"},
"code": "invalid",
}
]
)
# View access
self.request._date_from = date_from
self.request._date_to = date_to
# Apply date filter (only lte for fill-forward logic)
queryset = queryset.filter(date__lte=date_to)
return super().filter_queryset(queryset)
class ScanSummarySeverityFilter(ScanSummaryFilter):
"""Filter for findings_severity ScanSummary endpoint - includes status filters"""
@@ -0,0 +1,96 @@
# Generated by Django 5.1.14 on 2025-12-03 13:38
import uuid
import django.db.models.deletion
from django.db import migrations, models
import api.rls
class Migration(migrations.Migration):
dependencies = [
("api", "0060_attack_surface_overview"),
]
operations = [
migrations.CreateModel(
name="DailySeveritySummary",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("date", models.DateField()),
("critical", models.IntegerField(default=0)),
("high", models.IntegerField(default=0)),
("medium", models.IntegerField(default=0)),
("low", models.IntegerField(default=0)),
("informational", models.IntegerField(default=0)),
("muted", models.IntegerField(default=0)),
(
"provider",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
to="api.provider",
),
),
(
"scan",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
to="api.scan",
),
),
(
"tenant",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="api.tenant",
),
),
],
options={
"db_table": "daily_severity_summaries",
"abstract": False,
},
),
migrations.AddIndex(
model_name="dailyseveritysummary",
index=models.Index(
fields=["tenant_id", "id"],
name="dss_tenant_id_idx",
),
),
migrations.AddIndex(
model_name="dailyseveritysummary",
index=models.Index(
fields=["tenant_id", "provider_id"],
name="dss_tenant_provider_idx",
),
),
migrations.AddConstraint(
model_name="dailyseveritysummary",
constraint=models.UniqueConstraint(
fields=("tenant_id", "provider", "date"),
name="unique_daily_severity_summary",
),
),
migrations.AddConstraint(
model_name="dailyseveritysummary",
constraint=api.rls.RowLevelSecurityConstraint(
"tenant_id",
name="rls_on_dailyseveritysummary",
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
),
),
]
+59
View File
@@ -1500,6 +1500,65 @@ class ScanSummary(RowLevelSecurityProtectedModel):
resource_name = "scan-summaries"
class DailySeveritySummary(RowLevelSecurityProtectedModel):
"""
Pre-aggregated daily severity counts per provider.
Used by findings_severity/timeseries endpoint for efficient queries.
"""
objects = ActiveProviderManager()
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
date = models.DateField()
provider = models.ForeignKey(
Provider,
on_delete=models.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
)
scan = models.ForeignKey(
Scan,
on_delete=models.CASCADE,
related_name="daily_severity_summaries",
related_query_name="daily_severity_summary",
)
# Aggregated fail counts by severity
critical = models.IntegerField(default=0)
high = models.IntegerField(default=0)
medium = models.IntegerField(default=0)
low = models.IntegerField(default=0)
informational = models.IntegerField(default=0)
muted = models.IntegerField(default=0)
class Meta(RowLevelSecurityProtectedModel.Meta):
db_table = "daily_severity_summaries"
constraints = [
models.UniqueConstraint(
fields=("tenant_id", "provider", "date"),
name="unique_daily_severity_summary",
),
RowLevelSecurityConstraint(
field="tenant_id",
name="rls_on_%(class)s",
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
),
]
indexes = [
models.Index(
fields=["tenant_id", "id"],
name="dss_tenant_id_idx",
),
models.Index(
fields=["tenant_id", "provider_id"],
name="dss_tenant_provider_idx",
),
]
class Integration(RowLevelSecurityProtectedModel):
class IntegrationChoices(models.TextChoices):
AMAZON_S3 = "amazon_s3", _("Amazon S3")
+202
View File
@@ -4940,6 +4940,154 @@ paths:
schema:
$ref: '#/components/schemas/OverviewSeverityResponse'
description: ''
/api/v1/overviews/findings_severity/timeseries:
get:
operationId: overviews_findings_severity_timeseries_retrieve
description: Retrieve daily aggregated findings data grouped by severity levels
over a date range. Returns one data point per day with counts of failed findings
by severity (critical, high, medium, low, informational) and muted findings.
Days without scans are filled forward with the most recent known values. Use
date_from (required) and date_to filters to specify the range.
summary: Get findings severity data over time
parameters:
- in: query
name: fields[findings-severity-timeseries]
schema:
type: array
items:
type: string
enum:
- id
- critical
- high
- medium
- low
- informational
- muted
- scan_ids
description: endpoint return only specific fields in the response on a per-type
basis by including a fields[TYPE] query parameter.
explode: false
- in: query
name: filter[date_from]
schema:
type: string
- in: query
name: filter[date_to]
schema:
type: string
- in: query
name: filter[provider_id]
schema:
type: string
format: uuid
- in: query
name: filter[provider_id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
type: string
enum:
- aws
- azure
- gcp
- github
- iac
- kubernetes
- m365
- mongodbatlas
- oraclecloud
description: |-
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
- in: query
name: filter[provider_type__in]
schema:
type: array
items:
type: string
enum:
- aws
- azure
- gcp
- github
- iac
- kubernetes
- m365
- mongodbatlas
- oraclecloud
description: |-
Multiple values may be separated by commas.
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
* `m365` - M365
* `github` - GitHub
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
explode: false
style: form
- name: filter[search]
required: false
in: query
description: A search term.
schema:
type: string
- name: sort
required: false
in: query
description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
schema:
type: array
items:
type: string
enum:
- id
- -id
- critical
- -critical
- high
- -high
- medium
- -medium
- low
- -low
- informational
- -informational
- muted
- -muted
- scan_ids
- -scan_ids
explode: false
tags:
- Overview
security:
- JWT or API Key: []
responses:
'200':
content:
application/vnd.api+json:
schema:
$ref: '#/components/schemas/FindingsSeverityOverTimeResponse'
description: ''
/api/v1/overviews/providers:
get:
operationId: overviews_providers_retrieve
@@ -11099,6 +11247,60 @@ components:
$ref: '#/components/schemas/Finding'
required:
- data
FindingsSeverityOverTime:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
type: string
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
enum:
- findings-severity-over-time
id: {}
attributes:
type: object
properties:
id:
type: string
format: date
critical:
type: integer
high:
type: integer
medium:
type: integer
low:
type: integer
informational:
type: integer
muted:
type: integer
scan_ids:
type: array
items:
type: string
format: uuid
required:
- id
- critical
- high
- medium
- low
- informational
- muted
- scan_ids
FindingsSeverityOverTimeResponse:
type: object
properties:
data:
$ref: '#/components/schemas/FindingsSeverityOverTime'
required:
- data
Integration:
type: object
required:
+266 -1
View File
@@ -3,7 +3,7 @@ import io
import json
import os
import tempfile
from datetime import datetime, timedelta, timezone
from datetime import date, datetime, timedelta, timezone
from decimal import Decimal
from pathlib import Path
from types import SimpleNamespace
@@ -38,6 +38,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -6984,6 +6985,270 @@ class TestOverviewViewSet:
assert combined_attributes["medium"] == 4
assert combined_attributes["critical"] == 3
def test_overview_findings_severity_timeseries_requires_date_from(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries")
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "date_from" in response.json()["errors"][0]["source"]["pointer"]
def test_overview_findings_severity_timeseries_invalid_date_format(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{"filter[date_from]": "invalid-date"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Enter a valid date." in response.json()["errors"][0]["detail"]
def test_overview_findings_severity_timeseries_empty_data(
self, authenticated_client
):
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-01-01",
"filter[date_to]": "2024-01-03",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
# Should return 3 days with fill-forward (all zeros since no data)
assert len(data) == 3
for item in data:
assert item["attributes"]["critical"] == 0
assert item["attributes"]["high"] == 0
assert item["attributes"]["medium"] == 0
assert item["attributes"]["low"] == 0
assert item["attributes"]["informational"] == 0
assert item["attributes"]["muted"] == 0
assert item["attributes"]["scan_ids"] == []
def test_overview_findings_severity_timeseries_with_data(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
# Create scan for day 1
scan1 = Scan.objects.create(
name="severity-over-time-scan-1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
)
# Create scan for day 3
scan3 = Scan.objects.create(
name="severity-over-time-scan-3",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 1, 3, 12, 0, 0, tzinfo=timezone.utc),
)
# Create DailySeveritySummary for day 1
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 1, 1),
critical=10,
high=20,
medium=30,
low=40,
informational=50,
muted=5,
)
# Create DailySeveritySummary for day 3
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan3,
date=date(2024, 1, 3),
critical=15,
high=25,
medium=35,
low=45,
informational=55,
muted=10,
)
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-01-01",
"filter[date_to]": "2024-01-03",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 3
# Day 1 - actual data (id is the date)
assert data[0]["id"] == "2024-01-01"
assert data[0]["attributes"]["critical"] == 10
assert data[0]["attributes"]["high"] == 20
assert data[0]["attributes"]["scan_ids"] == [str(scan1.id)]
# Day 2 - fill forward from day 1 (no data for this day)
assert data[1]["id"] == "2024-01-02"
assert data[1]["attributes"]["critical"] == 10
assert data[1]["attributes"]["high"] == 20
assert data[1]["attributes"]["scan_ids"] == [str(scan1.id)]
# Day 3 - actual data
assert data[2]["id"] == "2024-01-03"
assert data[2]["attributes"]["critical"] == 15
assert data[2]["attributes"]["high"] == 25
assert data[2]["attributes"]["scan_ids"] == [str(scan3.id)]
def test_overview_findings_severity_timeseries_aggregates_providers(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
# Same day, different providers
scan1 = Scan.objects.create(
name="severity-over-time-scan-p1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 2, 1, 12, 0, 0, tzinfo=timezone.utc),
)
scan2 = Scan.objects.create(
name="severity-over-time-scan-p2",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 2, 1, 14, 0, 0, tzinfo=timezone.utc),
)
# Create DailySeveritySummary for provider1
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 2, 1),
critical=10,
high=20,
medium=30,
low=40,
informational=50,
muted=5,
)
# Create DailySeveritySummary for provider2
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider2,
scan=scan2,
date=date(2024, 2, 1),
critical=5,
high=10,
medium=15,
low=20,
informational=25,
muted=3,
)
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-02-01",
"filter[date_to]": "2024-02-01",
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
# Should aggregate both providers
assert data[0]["attributes"]["critical"] == 15 # 10 + 5
assert data[0]["attributes"]["high"] == 30 # 20 + 10
assert data[0]["attributes"]["medium"] == 45 # 30 + 15
assert data[0]["attributes"]["low"] == 60 # 40 + 20
assert data[0]["attributes"]["informational"] == 75 # 50 + 25
assert data[0]["attributes"]["muted"] == 8 # 5 + 3
# scan_ids should contain both scans (order may vary)
assert set(data[0]["attributes"]["scan_ids"]) == {str(scan1.id), str(scan2.id)}
def test_overview_findings_severity_timeseries_provider_filter(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
scan1 = Scan.objects.create(
name="severity-over-time-filter-scan-p1",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 3, 1, 12, 0, 0, tzinfo=timezone.utc),
)
scan2 = Scan.objects.create(
name="severity-over-time-filter-scan-p2",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
completed_at=datetime(2024, 3, 1, 14, 0, 0, tzinfo=timezone.utc),
)
# Provider 1 - critical=100
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider1,
scan=scan1,
date=date(2024, 3, 1),
critical=100,
high=0,
medium=0,
low=0,
informational=0,
muted=0,
)
# Provider 2 - critical=50
DailySeveritySummary.objects.create(
tenant=tenant,
provider=provider2,
scan=scan2,
date=date(2024, 3, 1),
critical=50,
high=0,
medium=0,
low=0,
informational=0,
muted=0,
)
# Filter by provider1 only
response = authenticated_client.get(
reverse("overview-findings_severity_timeseries"),
{
"filter[date_from]": "2024-03-01",
"filter[date_to]": "2024-03-01",
"filter[provider_id]": str(provider1.id),
},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert len(data) == 1
assert data[0]["attributes"]["critical"] == 100 # Only provider1
assert data[0]["attributes"]["scan_ids"] == [str(scan1.id)]
def test_overview_attack_surface_no_data(self, authenticated_client):
response = authenticated_client.get(reverse("overview-attack-surface"))
assert response.status_code == status.HTTP_200_OK
+16
View File
@@ -2204,6 +2204,22 @@ class OverviewSeveritySerializer(BaseSerializerV1):
resource_name = "findings-severity-overview"
class FindingsSeverityOverTimeSerializer(BaseSerializerV1):
"""Serializer for daily findings severity trend data."""
id = serializers.DateField(source="date")
critical = serializers.IntegerField()
high = serializers.IntegerField()
medium = serializers.IntegerField()
low = serializers.IntegerField()
informational = serializers.IntegerField()
muted = serializers.IntegerField()
scan_ids = serializers.ListField(child=serializers.UUIDField())
class JSONAPIMeta:
resource_name = "findings-severity-over-time"
class OverviewServiceSerializer(BaseSerializerV1):
id = serializers.CharField(source="service")
total = serializers.IntegerField()
+137 -1
View File
@@ -102,6 +102,7 @@ from api.filters import (
AttackSurfaceOverviewFilter,
ComplianceOverviewFilter,
CustomDjangoFilterBackend,
DailySeveritySummaryFilter,
FindingFilter,
IntegrationFilter,
IntegrationJiraFindingsFilter,
@@ -131,6 +132,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
Integration,
Invitation,
@@ -184,6 +186,7 @@ from api.v1.serializers import (
FindingDynamicFilterSerializer,
FindingMetadataSerializer,
FindingSerializer,
FindingsSeverityOverTimeSerializer,
IntegrationCreateSerializer,
IntegrationJiraDispatchSerializer,
IntegrationSerializer,
@@ -4009,6 +4012,16 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
),
filters=True,
),
findings_severity_timeseries=extend_schema(
summary="Get findings severity data over time",
description=(
"Retrieve daily aggregated findings data grouped by severity levels over a date range. "
"Returns one data point per day with counts of failed findings by severity (critical, high, "
"medium, low, informational) and muted findings. Days without scans are filled forward with "
"the most recent known values. Use date_from (required) and date_to filters to specify the range."
),
filters=True,
),
attack_surface=extend_schema(
summary="Get attack surface overview",
description="Retrieve aggregated attack surface metrics from latest completed scans per provider.",
@@ -4057,7 +4070,16 @@ class OverviewViewSet(BaseRLSViewSet):
if not role.unlimited_visibility:
self.allowed_providers = providers
return ScanSummary.all_objects.filter(tenant_id=self.request.tenant_id)
tenant_id = self.request.tenant_id
# Return appropriate queryset per action
if self.action == "findings_severity_timeseries":
qs = DailySeveritySummary.objects.filter(tenant_id=tenant_id)
if hasattr(self, "allowed_providers"):
qs = qs.filter(provider_id__in=self.allowed_providers)
return qs
return ScanSummary.all_objects.filter(tenant_id=tenant_id)
def get_serializer_class(self):
if self.action == "providers":
@@ -4068,6 +4090,8 @@ class OverviewViewSet(BaseRLSViewSet):
return OverviewFindingSerializer
elif self.action == "findings_severity":
return OverviewSeveritySerializer
elif self.action == "findings_severity_timeseries":
return FindingsSeverityOverTimeSerializer
elif self.action == "services":
return OverviewServiceSerializer
elif self.action == "regions":
@@ -4085,8 +4109,18 @@ class OverviewViewSet(BaseRLSViewSet):
return ScanSummaryFilter
elif self.action == "findings_severity":
return ScanSummarySeverityFilter
elif self.action == "findings_severity_timeseries":
return DailySeveritySummaryFilter
return None
def filter_queryset(self, queryset):
# Skip OrderingFilter for findings_severity_timeseries (no inserted_at field)
if self.action == "findings_severity_timeseries":
return CustomDjangoFilterBackend().filter_queryset(
self.request, queryset, self
)
return super().filter_queryset(queryset)
@extend_schema(exclude=True)
def list(self, request, *args, **kwargs):
raise MethodNotAllowed(method="GET")
@@ -4363,6 +4397,108 @@ class OverviewViewSet(BaseRLSViewSet):
return Response(serializer.data, status=status.HTTP_200_OK)
@action(
detail=False,
methods=["get"],
url_path="findings_severity/timeseries",
url_name="findings_severity_timeseries",
)
def findings_severity_timeseries(self, request):
"""
Daily severity trends for charts. Uses DailySeveritySummary pre-aggregation.
Requires date_from filter.
"""
# Get queryset with RBAC, provider, and date filters applied
# Date validation is handled by DailySeveritySummaryFilter
daily_qs = self.filter_queryset(self.get_queryset())
date_from = request._date_from
date_to = request._date_to
if not daily_qs.exists():
# No data matches filters - return zeros
result = self._generate_zero_result(date_from, date_to)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# Fetch all data for fill-forward logic
daily_summaries = list(
daily_qs.order_by("provider_id", "-date").values(
"provider_id",
"scan_id",
"date",
"critical",
"high",
"medium",
"low",
"informational",
"muted",
)
)
if not daily_summaries:
result = self._generate_zero_result(date_from, date_to)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# Build provider_data: {provider_id: [(date, data), ...]} sorted by date desc
provider_data = defaultdict(list)
for summary in daily_summaries:
provider_data[summary["provider_id"]].append(summary)
# For each day, find the latest data per provider and sum values
result = []
current_date = date_from
while current_date <= date_to:
day_totals = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
day_scan_ids = []
for provider_id, summaries in provider_data.items():
# Find the latest data for this provider <= current_date
for summary in summaries: # Already sorted by date desc
if summary["date"] <= current_date:
day_totals["critical"] += summary["critical"] or 0
day_totals["high"] += summary["high"] or 0
day_totals["medium"] += summary["medium"] or 0
day_totals["low"] += summary["low"] or 0
day_totals["informational"] += summary["informational"] or 0
day_totals["muted"] += summary["muted"] or 0
day_scan_ids.append(summary["scan_id"])
break # Found the latest data for this provider
result.append(
{"date": current_date, "scan_ids": day_scan_ids, **day_totals}
)
current_date += timedelta(days=1)
serializer = self.get_serializer(result, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def _generate_zero_result(self, date_from, date_to):
"""Generate a list of zero-filled results for each date in range."""
result = []
current_date = date_from
zero_values = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
"scan_ids": [],
}
while current_date <= date_to:
result.append({"date": current_date, **zero_values})
current_date += timedelta(days=1)
return result
@extend_schema(
summary="Get ThreatScore snapshots",
description=(
+101
View File
@@ -1,14 +1,18 @@
from collections import defaultdict
from django.db.models import Sum
from api.db_router import READ_REPLICA_ALIAS
from api.db_utils import rls_transaction
from api.models import (
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Resource,
ResourceFindingMapping,
ResourceScanSummary,
Scan,
ScanSummary,
StateChoices,
)
@@ -175,3 +179,100 @@ def backfill_compliance_summaries(tenant_id: str, scan_id: str):
)
return {"status": "backfilled", "inserted": len(summary_objects)}
def backfill_daily_severity_summaries(tenant_id: str, days: int = None):
"""
Backfill DailySeveritySummary from completed scans.
Groups by provider+date, keeps latest scan per day.
"""
from datetime import timedelta
from django.utils import timezone
created_count = 0
updated_count = 0
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
scan_filter = {
"tenant_id": tenant_id,
"state": StateChoices.COMPLETED,
"completed_at__isnull": False,
}
if days is not None:
cutoff_date = timezone.now() - timedelta(days=days)
scan_filter["completed_at__gte"] = cutoff_date
completed_scans = (
Scan.objects.filter(**scan_filter)
.order_by("provider_id", "-completed_at")
.values("id", "provider_id", "completed_at")
)
if not completed_scans:
return {"status": "no scans to backfill"}
# Keep only latest scan per provider/day
latest_scans_by_day = {}
for scan in completed_scans:
key = (scan["provider_id"], scan["completed_at"].date())
if key not in latest_scans_by_day:
latest_scans_by_day[key] = scan
# Process each provider/day
for (provider_id, scan_date), scan in latest_scans_by_day.items():
scan_id = scan["id"]
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
severity_totals = (
ScanSummary.objects.filter(
tenant_id=tenant_id,
scan_id=scan_id,
)
.values("severity")
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
)
severity_data = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
for row in severity_totals:
severity = row["severity"]
if severity in severity_data:
severity_data[severity] = row["total_fail"] or 0
severity_data["muted"] += row["total_muted"] or 0
with rls_transaction(tenant_id):
_, created = DailySeveritySummary.objects.update_or_create(
tenant_id=tenant_id,
provider_id=provider_id,
date=scan_date,
defaults={
"scan_id": scan_id,
"critical": severity_data["critical"],
"high": severity_data["high"],
"medium": severity_data["medium"],
"low": severity_data["low"],
"informational": severity_data["informational"],
"muted": severity_data["muted"],
},
)
if created:
created_count += 1
else:
updated_count += 1
return {
"status": "backfilled",
"created": created_count,
"updated": updated_count,
"total_days": len(latest_scans_by_day),
}
+70
View File
@@ -29,6 +29,7 @@ from api.models import (
AttackSurfaceOverview,
ComplianceOverviewSummary,
ComplianceRequirementOverview,
DailySeveritySummary,
Finding,
MuteRule,
Processor,
@@ -1348,3 +1349,72 @@ def aggregate_attack_surface(tenant_id: str, scan_id: str):
)
else:
logger.info(f"No attack surface overview records created for scan {scan_id}")
def aggregate_daily_severity(tenant_id: str, scan_id: str):
"""Aggregate scan severity counts into DailySeveritySummary (one record per provider/day)."""
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
scan = Scan.objects.filter(
tenant_id=tenant_id,
id=scan_id,
state=StateChoices.COMPLETED,
).first()
if not scan:
logger.warning(f"Scan {scan_id} not found or not completed")
return {"status": "scan is not completed"}
provider_id = scan.provider_id
scan_date = scan.completed_at.date()
severity_totals = (
ScanSummary.objects.filter(
tenant_id=tenant_id,
scan_id=scan_id,
)
.values("severity")
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
)
severity_data = {
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"informational": 0,
"muted": 0,
}
for row in severity_totals:
severity = row["severity"]
if severity in severity_data:
severity_data[severity] = row["total_fail"] or 0
severity_data["muted"] += row["total_muted"] or 0
with rls_transaction(tenant_id):
summary, created = DailySeveritySummary.objects.update_or_create(
tenant_id=tenant_id,
provider_id=provider_id,
date=scan_date,
defaults={
"scan_id": scan_id,
"critical": severity_data["critical"],
"high": severity_data["high"],
"medium": severity_data["medium"],
"low": severity_data["low"],
"informational": severity_data["informational"],
"muted": severity_data["muted"],
},
)
action = "created" if created else "updated"
logger.info(
f"Daily severity summary {action} for provider {provider_id} on {scan_date}"
)
return {
"status": action,
"provider_id": str(provider_id),
"date": str(scan_date),
"severity_data": severity_data,
}
+20 -2
View File
@@ -10,6 +10,7 @@ from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIR
from django_celery_beat.models import PeriodicTask
from tasks.jobs.backfill import (
backfill_compliance_summaries,
backfill_daily_severity_summaries,
backfill_resource_scan_summaries,
)
from tasks.jobs.connection import (
@@ -38,6 +39,7 @@ from tasks.jobs.muting import mute_historical_findings
from tasks.jobs.report import generate_compliance_reports_job
from tasks.jobs.scan import (
aggregate_attack_surface,
aggregate_daily_severity,
aggregate_findings,
create_compliance_requirements,
perform_prowler_scan,
@@ -75,8 +77,11 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
)
chain(
perform_scan_summary_task.si(tenant_id=tenant_id, scan_id=scan_id),
generate_outputs_task.si(
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
group(
aggregate_daily_severity_task.si(tenant_id=tenant_id, scan_id=scan_id),
generate_outputs_task.si(
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
),
),
group(
# Use optimized task that generates both reports with shared queries
@@ -523,6 +528,12 @@ def backfill_compliance_summaries_task(tenant_id: str, scan_id: str):
return backfill_compliance_summaries(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(name="backfill-daily-severity-summaries", queue="backfill")
def backfill_daily_severity_summaries_task(tenant_id: str, days: int = None):
"""Backfill DailySeveritySummary from historical scans. Use days param to limit scope."""
return backfill_daily_severity_summaries(tenant_id=tenant_id, days=days)
@shared_task(base=RLSTask, name="scan-compliance-overviews", queue="compliance")
@handle_provider_deletion
def create_compliance_requirements_task(tenant_id: str, scan_id: str):
@@ -556,6 +567,13 @@ def aggregate_attack_surface_task(tenant_id: str, scan_id: str):
return aggregate_attack_surface(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(name="scan-daily-severity", queue="overview")
@handle_provider_deletion
def aggregate_daily_severity_task(tenant_id: str, scan_id: str):
"""Aggregate scan severity into DailySeveritySummary for findings_severity/timeseries endpoint."""
return aggregate_daily_severity(tenant_id=tenant_id, scan_id=scan_id)
@shared_task(base=RLSTask, name="lighthouse-connection-check")
@set_tenant
def check_lighthouse_connection_task(lighthouse_config_id: str, tenant_id: str = None):
@@ -58,7 +58,7 @@ Before you begin, ensure you have:
### Authentication
Prowler supports multiple authentication methods for OCI. For detailed authentication setup, see the [OCI Authentication Guide](./authentication.mdx).
Prowler supports multiple authentication methods for OCI. For detailed authentication setup, see the [OCI Authentication Guide](./authentication).
**Note:** OCI Session Authentication and Config File Authentication both use the same `~/.oci/config` file. The difference is how the config file is generated - automatically via browser (session auth) or manually with API keys.
@@ -107,7 +107,7 @@ The easiest and most secure method is using OCI session authentication, which au
#### Alternative: Manual API Key Setup
If you prefer to manually generate API keys instead of using browser-based session authentication, see the detailed instructions in the [Authentication Guide](./authentication.mdx#config-file-authentication-manual-api-key-setup).
If you prefer to manually generate API keys instead of using browser-based session authentication, see the detailed instructions in the [Authentication Guide](./authentication#config-file-authentication-manual-api-key-setup).
**Note:** Both methods use the same `~/.oci/config` file - the difference is that manual setup uses static API keys while session authentication uses temporary session tokens.
@@ -19,12 +19,17 @@ class CheckRemediation(MinimalSerializerMixin, BaseModel):
default=None,
description="Terraform code snippet with best practices for remediation",
)
recommendation_text: str | None = Field(
default=None, description="Text description with best practices"
)
recommendation_url: str | None = Field(
nativeiac: str | None = Field(
default=None,
description="URL to external remediation documentation",
description="Native Infrastructure as Code code snippet with best practices for remediation",
)
other: str | None = Field(
default=None,
description="Other remediation code snippet with best practices for remediation, usually used for web interfaces or other tools",
)
recommendation: str | None = Field(
default=None,
description="Text description with general best recommended practices to avoid the issue",
)
@@ -33,9 +38,6 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
model_config = ConfigDict(frozen=True)
check_id: str = Field(
description="Unique provider identifier for the security check (e.g., 's3_bucket_public_access')",
)
title: str = Field(
description="Human-readable title of the security check",
)
@@ -59,9 +61,9 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
default=None,
description="Remediation guidance including CLI commands and recommendations",
)
related_url: str | None = Field(
default=None,
description="URL to additional documentation or references",
additional_urls: list[str] = Field(
default_factory=list,
description="List of additional URLs related to the check",
)
categories: list[str] = Field(
default_factory=list,
@@ -79,23 +81,23 @@ class CheckMetadata(MinimalSerializerMixin, BaseModel):
recommendation = remediation_data.get("recommendation", {})
remediation = CheckRemediation(
cli=code.get("cli"),
terraform=code.get("terraform"),
recommendation_text=recommendation.get("text"),
recommendation_url=recommendation.get("url"),
cli=code["cli"],
terraform=code["terraform"],
nativeiac=code["nativeiac"],
other=code["other"],
recommendation=recommendation["text"],
)
return cls(
check_id=data["checkid"],
title=data["checktitle"],
description=data["description"],
provider=data["provider"],
risk=data.get("risk"),
risk=data["risk"],
service=data["servicename"],
resource_type=data["resourcetype"],
remediation=remediation,
related_url=data.get("relatedurl"),
categories=data.get("categories", []),
additional_urls=data["additionalurls"],
categories=data["categories"],
)
@@ -116,35 +118,36 @@ class SimplifiedFinding(MinimalSerializerMixin, BaseModel):
severity: Literal["critical", "high", "medium", "low", "informational"] = Field(
description="Severity level of the finding",
)
check_metadata: CheckMetadata = Field(
description="Metadata about the security check that generated this finding",
check_id: str = Field(
description="ID of the security check that generated this finding",
)
status_extended: str = Field(
description="Extended status information providing additional context",
)
delta: Literal["new", "changed"] = Field(
delta: Literal["new", "changed"] | None = Field(
default=None,
description="Change status: 'new' (not seen before), 'changed' (modified since last scan), or None (unchanged)",
)
muted: bool = Field(
muted: bool | None = Field(
default=None,
description="Whether this finding has been muted/suppressed by the user",
)
muted_reason: str = Field(
muted_reason: str | None = Field(
default=None,
description="Reason provided when muting this finding (3-500 chars if muted)",
description="Reason provided when muting this finding",
)
@classmethod
def from_api_response(cls, data: dict) -> "SimplifiedFinding":
"""Transform JSON:API finding response to simplified format."""
attributes = data["attributes"]
check_metadata = attributes["check_metadata"]
return cls(
id=data["id"],
uid=attributes["uid"],
status=attributes["status"],
severity=attributes["severity"],
check_metadata=CheckMetadata.from_api_response(check_metadata),
check_id=attributes["check_metadata"]["checkid"],
status_extended=attributes["status_extended"],
delta=attributes["delta"],
muted=attributes["muted"],
@@ -179,6 +182,9 @@ class DetailedFinding(SimplifiedFinding):
default_factory=list,
description="List of UUIDs for cloud resources associated with this finding",
)
check_metadata: CheckMetadata = Field(
description="Metadata about the security check that generated this finding",
)
@classmethod
def from_api_response(cls, data: dict) -> "DetailedFinding":
@@ -204,6 +210,7 @@ class DetailedFinding(SimplifiedFinding):
uid=attributes["uid"],
status=attributes["status"],
severity=attributes["severity"],
check_id=check_metadata["checkid"],
check_metadata=CheckMetadata.from_api_response(check_metadata),
status_extended=attributes.get("status_extended"),
delta=attributes.get("delta"),
@@ -19,9 +19,9 @@ class FindingsTools(BaseTool):
"""Tools for security findings operations.
Provides tools for:
- Searching and filtering security findings
- Getting detailed finding information
- Viewing findings overview/statistics
- search_security_findings: Fast and lightweight searching across findings
- get_finding_details: Get complete details for a specific finding
- get_findings_overview: Get aggregate statistics and trends across all findings
"""
async def search_security_findings(
@@ -90,27 +90,27 @@ class FindingsTools(BaseTool):
) -> dict[str, Any]:
"""Search and filter security findings across all cloud providers with rich filtering capabilities.
This is the primary tool for browsing and filtering security findings. Returns lightweight findings
optimized for searching across large result sets. For detailed information about a specific finding,
use get_finding_details.
IMPORTANT: This tool returns LIGHTWEIGHT findings. Use this for fast searching and filtering across many findings.
For complete details use prowler_app_get_finding_details on specific findings.
Default behavior:
- Returns latest findings from most recent scans (no date parameters needed)
- Filters to FAIL status only (security issues found)
- Returns 100 results per page
- Returns 50 results per page
Date filtering:
- Without dates: queries findings from the most recent completed scan across all providers (most efficient). This returns the latest snapshot of findings, not a time-based query.
- With dates: queries historical findings (2-day maximum range)
- Without dates: queries findings from the most recent completed scan across all providers (most efficient)
- With dates: queries historical findings (2-day maximum range between date_from and date_to)
Each finding includes:
- Core identification: id, uid, check_id
- Security context: status, severity, check_metadata (title, description, remediation)
- State tracking: delta (new/changed), muted status
- Extended details: status_extended for additional context
- Core identification: id (UUID for get_finding_details), uid, check_id
- Security context: status (FAIL/PASS/MANUAL), severity (critical/high/medium/low/informational)
- State tracking: delta (new/changed/unchanged), muted (boolean), muted_reason
- Extended details: status_extended with additional context
Returns:
Paginated list of simplified findings with total count and pagination metadata
Workflow:
1. Use this tool to search and filter findings by severity, status, provider, service, region, etc.
2. Use prowler_app_get_finding_details with the finding 'id' to get complete information about the finding
"""
# Validate page_size parameter
self.api_client.validate_page_size(page_size)
@@ -185,21 +185,39 @@ class FindingsTools(BaseTool):
) -> dict[str, Any]:
"""Retrieve comprehensive details about a specific security finding by its ID.
This tool provides MORE detailed information than search_security_findings. Use this when you need
to deeply analyze a specific finding or understand its complete context and history.
IMPORTANT: This tool returns COMPLETE finding details.
Use this after finding a specific finding via prowler_app_search_security_findings
Additional information compared to search_security_findings:
- Temporal metadata: when the finding was first seen, inserted, and last updated
- Scan relationship: ID of the scan that generated this finding
- Resource relationships: IDs of all cloud resources associated with this finding
This tool provides ALL information that prowler_app_search_security_findings returns PLUS:
1. Check Metadata (information about the check script that generated the finding):
- title: Human-readable phrase used to summarize the check
- description: Detailed explanation of what the check validates and why it is important
- risk: What could happen if this check fails
- remediation: Complete remediation guidance including step-by-step instructions and code snippets with best practices to fix the issue:
* cli: Command-line commands to fix the issue
* terraform: Terraform code snippets with best practices
* nativeiac: Provider native Infrastructure as Code code snippets with best practices to fix the issue
* other: Other remediation code snippets with best practices, usually used for web interfaces or other tools
* recommendation: Text description with general best recommended practices to avoid the issue
- provider: Cloud provider (aws/azure/gcp/etc)
- service: Service name (s3/ec2/keyvault/etc)
- resource_type: Resource type being evaluated
- categories: Security categories this check belongs to
- additional_urls: List of additional URLs related to the check
2. Temporal Metadata:
- inserted_at: When this finding was first inserted into database
- updated_at: When this finding was last updated
- first_seen_at: When this finding was first detected across all scans
3. Relationships:
- scan_id: UUID of the scan that generated this finding
- resource_ids: List of UUIDs for cloud resources associated with this finding
Workflow:
1. Use search_security_findings to browse and filter across many findings
2. Use get_finding_details to drill down into specific findings of interest
Returns:
dict containing detailed finding with comprehensive security metadata, temporal information,
and relationships to scans and resources
1. Use prowler_app_search_security_findings to browse and filter findings
2. Use this tool with the finding 'id' to get remediation guidance and complete context
"""
params = {
# Return comprehensive fields including temporal metadata
@@ -225,26 +243,31 @@ class FindingsTools(BaseTool):
description="Filter statistics by cloud provider. Multiple values allowed. If empty, all providers are returned. For valid values, please refer to Prowler Hub/Prowler Documentation that you can also find in form of tools in this MCP Server.",
),
) -> dict[str, Any]:
"""Get high-level statistics about security findings formatted as a human-readable markdown report.
"""Get aggregate statistics and trends about security findings as a markdown report.
Use this tool to get a quick overview of your security posture without retrieving individual findings.
Perfect for understanding trends, identifying areas of concern, and tracking improvements over time.
This tool provides a HIGH-LEVEL OVERVIEW without retrieving individual findings. Use this when you
need to understand the overall security posture, trends, or remediation progress across all findings.
The report includes:
- Summary statistics: total findings, fail/pass/muted counts with percentages
- Delta analysis: breakdown of new vs changed findings
- Trending information: how findings are evolving over time
The markdown report includes:
Output format: Markdown-formatted report ready to present to users or include in documentation.
1. Summary Statistics:
- Total number of findings
- Failed checks (security issues) with percentage
- Passed checks (no issues) with percentage
- Muted findings (user-suppressed) with percentage
Use cases:
- Quick security posture assessment
- Tracking remediation progress over time
- Identifying which providers have most issues
- Understanding finding trends (improving or degrading)
2. Delta Analysis (Change Tracking):
- New findings: never seen before in previous scans
* Broken down by: new failures, new passes, new muted
- Changed findings: status changed since last scan
* Broken down by: changed to fail, changed to pass, changed to muted
- Unchanged findings: same status as previous scan
Returns:
Dictionary with 'report' key containing markdown-formatted summary statistics
This helps answer questions like:
- "What's my overall security posture?"
- "How many critical security issues do I have?"
- "Are we improving or getting worse over time?"
- "How many new security issues appeared since last scan?"
"""
params = {
# Return only LLM-relevant aggregate statistics
Generated
+8 -1
View File
@@ -2923,6 +2923,8 @@ python-versions = "*"
groups = ["dev"]
files = [
{file = "jsonpath-ng-1.7.0.tar.gz", hash = "sha256:f6f5f7fd4e5ff79c785f1573b394043b39849fb2bb47bcead935d12b00beab3c"},
{file = "jsonpath_ng-1.7.0-py2-none-any.whl", hash = "sha256:898c93fc173f0c336784a3fa63d7434297544b7198124a68f9a3ef9597b0ae6e"},
{file = "jsonpath_ng-1.7.0-py3-none-any.whl", hash = "sha256:f3d7f9e848cba1b6da28c55b1c26ff915dc9e0b1ba7e752a53d6da8d5cbd00b6"},
]
[package.dependencies]
@@ -5513,6 +5515,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
@@ -5521,6 +5524,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
@@ -5529,6 +5533,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
@@ -5537,6 +5542,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
@@ -5545,6 +5551,7 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
@@ -6453,4 +6460,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">3.9.1,<3.13"
content-hash = "433468987cb3c4499d094d90e9f8cc9062a25ce115fde991a4e1b39edbfb7815"
content-hash = "1559a8799915bf0372eef07396e1dc40802911ef07ae92997cd260d9fe596ba3"
+16 -1
View File
@@ -11,8 +11,14 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `compute_instance_preemptible_vm_disabled` check for GCP provider [(#9342)](https://github.com/prowler-cloud/prowler/pull/9342)
- `compute_instance_automatic_restart_enabled` check for GCP provider [(#9271)](https://github.com/prowler-cloud/prowler/pull/9271)
- `compute_instance_deletion_protection_enabled` check for GCP provider [(#9358)](https://github.com/prowler-cloud/prowler/pull/9358)
- Update SOC2 - Azure with Processing Integrity requirements [(#9463)](https://github.com/prowler-cloud/prowler/pull/9463)
- Update SOC2 - GCP with Processing Integrity requirements [(#9464)](https://github.com/prowler-cloud/prowler/pull/9464)
- Update SOC2 - AWS with Processing Integrity requirements [(#9462)](https://github.com/prowler-cloud/prowler/pull/9462)
- RBI Cyber Security Framework compliance for Azure provider [(#8822)](https://github.com/prowler-cloud/prowler/pull/8822)
### Changed
- Update AWS Macie service metadata to new format [(#9265)](https://github.com/prowler-cloud/prowler/pull/9265)
- Update AWS Lightsail service metadata to new format [(#9264)](https://github.com/prowler-cloud/prowler/pull/9264)
- Update AWS GuardDuty service metadata to new format [(#9259)](https://github.com/prowler-cloud/prowler/pull/9259)
- Update AWS Network Firewall service metadata to new format [(#9382)](https://github.com/prowler-cloud/prowler/pull/9382)
@@ -22,10 +28,19 @@ All notable changes to the **Prowler SDK** are documented in this file.
---
## [v5.14.2] (Prowler UNRELEASED)
## [v5.14.3] (Prowler UNRELEASED)
### Fixed
- Fix duplicate requirement IDs in ISO 27001:2013 AWS compliance framework by adding unique letter suffixes
- Removed incorrect threat-detection category from checks metadata [(#9489)](https://github.com/prowler-cloud/prowler/pull/9489)
---
## [v5.14.2] (Prowler 5.14.2)
### Fixed
- Custom check folder metadata validation [(#9335)](https://github.com/prowler-cloud/prowler/pull/9335)
- Pin `alibabacloud-gateway-oss-util` to version 0.0.3 to address missing dependency [(#9487)](https://github.com/prowler-cloud/prowler/pull/9487)
---
File diff suppressed because it is too large Load Diff
+100
View File
@@ -547,6 +547,106 @@
"cloudwatch_log_group_retention_policy_specific_days_enabled",
"kinesis_stream_data_retention_period"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"apigateway_restapi_logging_enabled",
"apigatewayv2_api_access_logging_enabled",
"elbv2_logging_enabled",
"elb_logging_enabled",
"wafv2_webacl_logging_enabled",
"waf_global_webacl_logging_enabled",
"cloudtrail_s3_dataevents_write_enabled",
"cloudfront_distributions_logging_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"cloudtrail_multi_region_enabled",
"cloudtrail_log_file_validation_enabled",
"cloudtrail_cloudwatch_logging_enabled",
"cloudwatch_log_metric_filter_unauthorized_api_calls",
"cloudwatch_log_metric_filter_authentication_failures",
"cloudwatch_log_metric_filter_policy_changes",
"cloudwatch_log_metric_filter_root_usage",
"config_recorder_all_regions_enabled",
"rds_instance_integration_cloudwatch_logs",
"rds_cluster_integration_cloudwatch_logs",
"glue_etl_jobs_logging_enabled",
"stepfunctions_statemachine_logging_enabled"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"s3_bucket_default_encryption",
"s3_bucket_kms_encryption",
"cloudwatch_log_group_kms_encryption_enabled",
"sns_topics_kms_encryption_at_rest_enabled",
"kinesis_stream_encrypted_at_rest",
"cloudfront_distributions_field_level_encryption_enabled",
"cloudwatch_log_group_not_publicly_accessible",
"cloudwatch_cross_account_sharing_disabled",
"glue_etl_jobs_cloudwatch_logs_encryption_enabled",
"glue_etl_jobs_amazon_s3_encryption_enabled"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "aws",
"Type": "automated"
}
],
"Checks": [
"s3_bucket_object_versioning",
"s3_bucket_object_lock",
"rds_instance_storage_encrypted",
"rds_cluster_storage_encrypted",
"dynamodb_tables_kms_cmk_encryption_enabled",
"ec2_ebs_volume_encryption",
"backup_plans_exist",
"backup_recovery_point_encrypted",
"backup_vaults_encrypted",
"kms_cmk_rotation_enabled"
]
}
]
}
@@ -0,0 +1,248 @@
{
"Framework": "RBI-Cyber-Security-Framework",
"Name": "Reserve Bank of India (RBI) Cyber Security Framework",
"Version": "",
"Provider": "Azure",
"Description": "The Reserve Bank had prescribed a set of baseline cyber security controls for primary (Urban) cooperative banks (UCBs) in October 2018. On further examination, it has been decided to prescribe a comprehensive cyber security framework for the UCBs, as a graded approach, based on their digital depth and interconnectedness with the payment systems landscape, digital products offered by them and assessment of cyber security risk. The framework would mandate implementation of progressively stronger security measures based on the nature, variety and scale of digital product offerings of banks.",
"Requirements": [
{
"Id": "annex_i_1_1",
"Name": "Annex I (1.1)",
"Description": "UCBs should maintain an up-to-date business IT Asset Inventory Register containing the following fields, as a minimum: a) Details of the IT Asset (viz., hardware/software/network devices, key personnel, services, etc.), b. Details of systems where customer data are stored, c. Associated business applications, if any, d. Criticality of the IT asset (For example, High/Medium/Low).",
"Attributes": [
{
"ItemId": "annex_i_1_1",
"Service": "vm"
}
],
"Checks": [
"vm_ensure_using_approved_images",
"vm_ensure_using_managed_disks",
"vm_trusted_launch_enabled",
"aks_cluster_rbac_enabled",
"aks_clusters_created_with_private_nodes",
"appinsights_ensure_is_configured",
"containerregistry_admin_user_disabled"
]
},
{
"Id": "annex_i_1_3",
"Name": "Annex I (1.3)",
"Description": "Appropriately manage and provide protection within and outside UCB/network, keeping in mind how the data/information is stored, transmitted, processed, accessed and put to use within/outside the UCB's network, and level of risk they are exposed to depending on the sensitivity of the data/information.",
"Attributes": [
{
"ItemId": "annex_i_1_3",
"Service": "azure"
}
],
"Checks": [
"keyvault_key_rotation_enabled",
"keyvault_access_only_through_private_endpoints",
"keyvault_private_endpoints",
"keyvault_rbac_enabled",
"app_function_not_publicly_accessible",
"app_ensure_http_is_redirected_to_https",
"app_minimum_tls_version_12",
"storage_blob_public_access_level_is_disabled",
"storage_secure_transfer_required_is_enabled",
"storage_ensure_encryption_with_customer_managed_keys",
"storage_ensure_minimum_tls_version_12",
"storage_default_network_access_rule_is_denied",
"storage_ensure_private_endpoints_in_storage_accounts",
"network_ssh_internet_access_restricted",
"sqlserver_unrestricted_inbound_access",
"sqlserver_tde_encryption_enabled",
"sqlserver_tde_encrypted_with_cmk",
"cosmosdb_account_use_private_endpoints",
"cosmosdb_account_firewall_use_selected_networks",
"mysql_flexible_server_ssl_connection_enabled",
"mysql_flexible_server_minimum_tls_version_12",
"postgresql_flexible_server_enforce_ssl_enabled",
"aks_clusters_public_access_disabled",
"containerregistry_not_publicly_accessible",
"containerregistry_uses_private_link",
"aisearch_service_not_publicly_accessible"
]
},
{
"Id": "annex_i_5_1",
"Name": "Annex I (5.1)",
"Description": "The firewall configurations should be set to the highest security level and evaluation of critical device (such as firewall, network switches, security devices, etc.) configurations should be done periodically.",
"Attributes": [
{
"ItemId": "annex_i_5_1",
"Service": "network"
}
],
"Checks": [
"network_rdp_internet_access_restricted",
"network_http_internet_access_restricted",
"network_udp_internet_access_restricted",
"network_ssh_internet_access_restricted",
"network_flow_log_captured_sent",
"network_flow_log_more_than_90_days",
"network_watcher_enabled",
"network_bastion_host_exists",
"aks_network_policy_enabled",
"storage_default_network_access_rule_is_denied"
]
},
{
"Id": "annex_i_6",
"Name": "Annex I (6)",
"Description": "Put in place systems and processes to identify, track, manage and monitor the status of patches to servers, operating system and application software running at the systems used by the UCB officials (end-users). Implement and update antivirus protection for all servers and applicable end points preferably through a centralised system.",
"Attributes": [
{
"ItemId": "annex_i_6",
"Service": "defender"
}
],
"Checks": [
"defender_ensure_system_updates_are_applied",
"defender_assessments_vm_endpoint_protection_installed",
"defender_ensure_defender_for_server_is_on",
"defender_ensure_defender_for_app_services_is_on",
"defender_ensure_defender_for_sql_servers_is_on",
"defender_ensure_defender_for_azure_sql_databases_is_on",
"defender_ensure_defender_for_storage_is_on",
"defender_ensure_defender_for_containers_is_on",
"defender_ensure_defender_for_keyvault_is_on",
"defender_ensure_defender_for_arm_is_on",
"defender_ensure_defender_for_dns_is_on",
"defender_ensure_defender_for_databases_is_on",
"defender_ensure_defender_for_cosmosdb_is_on",
"defender_container_images_scan_enabled",
"defender_container_images_resolved_vulnerabilities",
"defender_auto_provisioning_vulnerabilty_assessments_machines_on",
"vm_backup_enabled",
"app_ensure_java_version_is_latest",
"app_ensure_php_version_is_latest",
"app_ensure_python_version_is_latest"
]
},
{
"Id": "annex_i_7_1",
"Name": "Annex I (7.1)",
"Description": "Disallow administrative rights on end-user workstations/PCs/laptops and provide access rights on a 'need to know' and 'need to do' basis.",
"Attributes": [
{
"ItemId": "annex_i_7_1",
"Service": "iam"
}
],
"Checks": [
"iam_role_user_access_admin_restricted",
"iam_subscription_roles_owner_custom_not_created",
"iam_custom_role_has_permissions_to_administer_resource_locks",
"entra_global_admin_in_less_than_five_users",
"entra_policy_ensure_default_user_cannot_create_apps",
"entra_policy_ensure_default_user_cannot_create_tenants",
"entra_policy_default_users_cannot_create_security_groups",
"entra_policy_guest_invite_only_for_admin_roles",
"entra_policy_guest_users_access_restrictions",
"app_function_identity_without_admin_privileges"
]
},
{
"Id": "annex_i_7_2",
"Name": "Annex I (7.2)",
"Description": "Passwords should be set as complex and lengthy and users should not use same passwords for all the applications/systems/devices.",
"Attributes": [
{
"ItemId": "annex_i_7_2",
"Service": "entra"
}
],
"Checks": [
"entra_non_privileged_user_has_mfa",
"entra_privileged_user_has_mfa",
"entra_policy_user_consent_for_verified_apps",
"entra_policy_restricts_user_consent_for_apps",
"entra_user_with_vm_access_has_mfa",
"entra_security_defaults_enabled",
"entra_conditional_access_policy_require_mfa_for_management_api",
"entra_trusted_named_locations_exists",
"sqlserver_azuread_administrator_enabled",
"postgresql_flexible_server_entra_id_authentication_enabled",
"cosmosdb_account_use_aad_and_rbac"
]
},
{
"Id": "annex_i_7_3",
"Name": "Annex I (7.3)",
"Description": "Remote Desktop Protocol (RDP) which allows others to access the computer remotely over a network or over the internet should be always disabled and should be enabled only with the approval of the authorised officer of the UCB. Logs for such remote access shall be enabled and monitored for suspicious activities.",
"Attributes": [
{
"ItemId": "annex_i_7_3",
"Service": "network"
}
],
"Checks": [
"network_rdp_internet_access_restricted",
"vm_jit_access_enabled",
"network_bastion_host_exists",
"vm_linux_enforce_ssh_authentication"
]
},
{
"Id": "annex_i_7_4",
"Name": "Annex I (7.4)",
"Description": "Implement appropriate (e.g. centralised) systems and controls to allow, manage, log and monitor privileged/super user/administrative access to critical systems (servers/databases, applications, network devices etc.)",
"Attributes": [
{
"ItemId": "annex_i_7_4",
"Service": "monitor"
}
],
"Checks": [
"monitor_alert_create_update_nsg",
"monitor_alert_delete_nsg",
"monitor_diagnostic_setting_with_appropriate_categories",
"monitor_diagnostic_settings_exists",
"monitor_alert_create_policy_assignment",
"monitor_alert_delete_policy_assignment",
"monitor_alert_create_update_security_solution",
"monitor_alert_delete_security_solution",
"monitor_alert_create_update_sqlserver_fr",
"monitor_alert_delete_sqlserver_fr",
"monitor_alert_create_update_public_ip_address_rule",
"monitor_alert_delete_public_ip_address_rule",
"monitor_alert_service_health_exists",
"monitor_storage_account_with_activity_logs_cmk_encrypted",
"monitor_storage_account_with_activity_logs_is_private",
"keyvault_logging_enabled",
"sqlserver_auditing_enabled",
"sqlserver_auditing_retention_90_days",
"app_http_logs_enabled",
"app_function_application_insights_enabled",
"defender_additional_email_configured_with_a_security_contact",
"defender_ensure_notify_alerts_severity_is_high",
"defender_ensure_notify_emails_to_owners",
"defender_ensure_mcas_is_enabled",
"defender_ensure_wdatp_is_enabled"
]
},
{
"Id": "annex_i_12",
"Name": "Annex I (12)",
"Description": "Take periodic back up of the important data and store this data 'off line' (i.e., transferring important files to a storage device that can be detached from a computer/system after copying all the files).",
"Attributes": [
{
"ItemId": "annex_i_12",
"Service": "azure"
}
],
"Checks": [
"vm_backup_enabled",
"vm_sufficient_daily_backup_retention_period",
"storage_ensure_file_shares_soft_delete_is_enabled",
"storage_blob_versioning_is_enabled",
"storage_ensure_soft_delete_is_enabled",
"storage_geo_redundant_enabled",
"keyvault_recoverable",
"sqlserver_vulnerability_assessment_enabled",
"sqlserver_va_periodic_recurring_scans_enabled"
]
}
]
}
+87 -1
View File
@@ -619,6 +619,92 @@
"sqlserver_auditing_retention_90_days",
"storage_ensure_soft_delete_is_enabled"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"app_http_logs_enabled",
"network_flow_log_captured_sent",
"keyvault_logging_enabled",
"monitor_diagnostic_settings_exists",
"sqlserver_auditing_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"monitor_diagnostic_setting_with_appropriate_categories",
"monitor_diagnostic_settings_exists",
"defender_auto_provisioning_log_analytics_agent_vms_on",
"mysql_flexible_server_audit_log_enabled",
"postgresql_flexible_server_log_checkpoints_on",
"postgresql_flexible_server_log_connections_on",
"postgresql_flexible_server_log_disconnections_on",
"network_flow_log_more_than_90_days"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"storage_ensure_encryption_with_customer_managed_keys",
"storage_infrastructure_encryption_is_enabled",
"monitor_storage_account_with_activity_logs_cmk_encrypted",
"monitor_storage_account_with_activity_logs_is_private",
"sqlserver_tde_encryption_enabled",
"sqlserver_tde_encrypted_with_cmk"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "azure",
"Type": "automated"
}
],
"Checks": [
"storage_ensure_encryption_with_customer_managed_keys",
"storage_infrastructure_encryption_is_enabled",
"storage_ensure_soft_delete_is_enabled",
"vm_ensure_attached_disks_encrypted_with_cmk",
"vm_ensure_unattached_disks_encrypted_with_cmk",
"keyvault_key_rotation_enabled",
"keyvault_recoverable"
]
}
]
}
}
+82 -1
View File
@@ -492,6 +492,87 @@
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock"
]
},
{
"Id": "pi_1_2",
"Name": "PI1.2 System inputs are measured and recorded completely, accurately, and timely to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements policies and procedures over system inputs, including controls over completeness and accuracy, to result in products, services, and reporting to meet the entity's objectives. This includes defining accuracy targets, monitoring input quality, and creating detailed records of each input event.",
"Attributes": [
{
"ItemId": "pi_1_2",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"compute_loadbalancer_logging_enabled",
"compute_subnet_flow_logs_enabled",
"logging_sink_created",
"iam_audit_logs_enabled"
]
},
{
"Id": "pi_1_3",
"Name": "PI1.3 Data is processed completely, accurately, and timely as authorized to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure data is processed completely, accurately, and timely. This includes defining processing specifications, identifying processing activities, detecting and correcting errors throughout processing, recording processing activities with accurate logs, and ensuring completeness and timeliness of processing.",
"Attributes": [
{
"ItemId": "pi_1_3",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"logging_log_metric_filter_and_alert_for_audit_configuration_changes_enabled",
"logging_log_metric_filter_and_alert_for_project_ownership_changes_enabled",
"logging_log_metric_filter_and_alert_for_sql_instance_configuration_changes_enabled",
"cloudsql_instance_postgres_log_connections_flag",
"cloudsql_instance_postgres_log_disconnections_flag",
"cloudsql_instance_postgres_log_statement_flag",
"iam_audit_logs_enabled"
]
},
{
"Id": "pi_1_4",
"Name": "PI1.4 System outputs are complete, accurate, distributed only to intended parties, and retained to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to ensure system outputs are delivered to authorized recipients in the correct format and protected against unauthorized access, modification, theft, destruction, or corruption. This includes output encryption, access controls, and audit trails for output delivery.",
"Attributes": [
{
"ItemId": "pi_1_4",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"cloudstorage_bucket_uniform_bucket_level_access",
"bigquery_dataset_cmk_encryption",
"bigquery_table_cmk_encryption",
"compute_instance_confidential_computing_enabled",
"pubsub_topic_encryption_with_cmk"
]
},
{
"Id": "pi_1_5",
"Name": "PI1.5 Stored data is maintained complete, accurate, and protected from unauthorized modification to meet the entity's processing integrity commitments and system requirements",
"Description": "The entity implements controls to protect stored inputs, items in processing, and outputs from theft, destruction, corruption, or deterioration. This includes data encryption at rest, key management, backup and recovery procedures, access controls, and data integrity validation.",
"Attributes": [
{
"ItemId": "pi_1_5",
"Section": "PI1.0 - Processing Integrity",
"Service": "gcp",
"Type": "automated"
}
],
"Checks": [
"cloudstorage_bucket_log_retention_policy_lock",
"cloudsql_instance_automated_backups",
"compute_instance_encryption_with_csek_enabled",
"kms_key_rotation_enabled",
"dataproc_encrypted_with_cmks_disabled"
]
}
]
}
}
@@ -29,9 +29,7 @@
"Url": "https://hub.prowler.com/check/apigateway_restapi_waf_acl_attached"
}
},
"Categories": [
"threat-detection"
],
"Categories": [],
"DependsOn": [],
"RelatedTo": [],
"Notes": "",
@@ -33,7 +33,7 @@
}
},
"Categories": [
"threat-detection"
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],
@@ -34,8 +34,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -35,8 +35,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -32,8 +32,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -29,8 +29,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -36,8 +36,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -34,8 +34,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -32,8 +32,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -32,8 +32,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -38,8 +38,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -33,8 +33,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -37,8 +37,7 @@
}
},
"Categories": [
"logging",
"threat-detection"
"logging"
],
"DependsOn": [],
"RelatedTo": [],
@@ -36,7 +36,6 @@
}
},
"Categories": [
"threat-detection",
"logging"
],
"DependsOn": [],
@@ -1,31 +1,39 @@
{
"Provider": "aws",
"CheckID": "macie_automated_sensitive_data_discovery_enabled",
"CheckTitle": "Check if Macie automated sensitive data discovery is enabled.",
"CheckTitle": "Macie automated sensitive data discovery is enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "macie",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "AwsAccount",
"Description": "Check if automated sensitive data discovery is enabled for an Amazon Macie account. The control fails if it isn't enabled.",
"Risk": "Without automated sensitive data discovery, there could be delays in identifying sensitive data, leading to data exposure risks in Amazon S3 buckets.",
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/macie-auto-sensitive-data-discovery-check.html",
"ResourceType": "Other",
"Description": "**Amazon Macie** administrator account has **automated sensitive data discovery** enabled for S3 data. The evaluation confirms the feature's status for the account in each Region.",
"Risk": "Without continuous discovery, sensitive S3 objects remain unclassified and unnoticed, weakening **confidentiality**. Over-permissive or public access can persist undetected, enabling **data exfiltration** and delaying containment and **forensic** response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/config/latest/developerguide/macie-auto-sensitive-data-discovery-check.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/macie-controls.html#macie-2",
"https://docs.aws.amazon.com/macie/latest/user/discovery-asdd-account-enable.html"
],
"Remediation": {
"Code": {
"CLI": "aws macie2 update-automated-discovery-configuration --status ENABLED",
"CLI": "aws macie2 update-automated-discovery-configuration --status ENABLED --region <REGION>",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/macie-controls.html#macie-2",
"Other": "1. In the AWS Console, open Amazon Macie\n2. Select the correct Region from the Region selector\n3. Go to Settings > Automated sensitive data discovery\n4. Click Enable under Status (choose My account if prompted)\n5. Repeat in other Regions where Macie is enabled if needed",
"Terraform": ""
},
"Recommendation": {
"Text": "To enable and configure automated sensitive data discovery jobs for S3 buckets, refer to the Configuring automated sensitive data discovery tutorial.",
"Url": "https://docs.aws.amazon.com/macie/latest/user/discovery-asdd-account-enable.html"
"Text": "Enable and maintain `automated sensitive data discovery` for the Macie administrator across required Regions. Include relevant buckets, tune identifiers and allow lists to reduce noise, and route findings to monitoring. Complement with **least privilege** on S3 and **defense in depth** for data protection.",
"Url": "https://hub.prowler.com/check/macie_automated_sensitive_data_discovery_enabled"
}
},
"Categories": [],
"Categories": [
"secrets"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,36 @@
{
"Provider": "aws",
"CheckID": "macie_is_enabled",
"CheckTitle": "Check if Amazon Macie is enabled.",
"CheckTitle": "Amazon Macie is enabled",
"CheckType": [
"Data Protection"
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/AWS Security Best Practices"
],
"ServiceName": "macie",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id",
"Severity": "low",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "Check if Amazon Macie is enabled.",
"Risk": "Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to help you discover, monitor and protect your sensitive data in AWS.",
"Description": "**Amazon Macie** status is assessed per region with **S3** presence to determine if sensitive data discovery is operational. The outcome reflects whether Macie is active or in a `PAUSED`/not enabled state for the account and region.",
"Risk": "Without active Macie, sensitive data in **S3** can remain unclassified and exposed. Misconfigured access and public buckets may go undetected, enabling data exfiltration and secret leakage. This degrades confidentiality and widens breach blast radius by reducing visibility into where sensitive data resides.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://aws.amazon.com/macie/getting-started/"
],
"Remediation": {
"Code": {
"CLI": "aws macie2 enable-macie",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "aws macie2 enable-macie --region <REGION>",
"NativeIaC": "```yaml\n# CloudFormation: Enable Amazon Macie in this region\nResources:\n MacieSession:\n Type: AWS::Macie::Session\n Properties:\n Status: ENABLED # Critical: Enables Macie for the account in this region\n```",
"Other": "1. Sign in to the AWS Management Console and switch to the target region\n2. Open Amazon Macie\n3. Click Get started or Enable Macie\n4. If Macie shows Suspended/Paused, click Resume Macie\n5. Repeat in each region with S3 buckets as needed",
"Terraform": "```hcl\n# Enables Amazon Macie in this region\nresource \"aws_macie2_account\" \"main\" {\n # Critical: Creating this resource enables Macie for the account in the region\n}\n```"
},
"Recommendation": {
"Text": "Enable Amazon Macie and create appropriate jobs to discover sensitive data.",
"Url": "https://aws.amazon.com/macie/getting-started/"
"Text": "Enable and maintain **Amazon Macie** in all regions hosting **S3** data. Use continuous sensitive data discovery, apply custom classifications for your data types, and route findings to monitoring. Enforce least privilege for Macie access and strengthen defense in depth with restrictive bucket policies and access controls.",
"Url": "https://hub.prowler.com/check/macie_is_enabled"
}
},
"Categories": [
"secrets",
"forensics-ready"
],
"DependsOn": [],
@@ -32,8 +32,7 @@
}
},
"Categories": [
"trust-boundaries",
"threat-detection"
"trust-boundaries"
],
"DependsOn": [],
"RelatedTo": [],
+1
View File
@@ -78,6 +78,7 @@ dependencies = [
"alibabacloud_ecs20140526==7.2.5",
"alibabacloud_sas20181203==6.1.0",
"alibabacloud_oss20190517==1.0.6",
"alibabacloud-gateway-oss-util==0.0.3",
"alibabacloud_actiontrail20200706==2.4.1",
"alibabacloud_cs20151215==6.1.0",
"alibabacloud-rds20140815==12.0.0",
+1 -1
View File
@@ -62,7 +62,7 @@ You are a code reviewer for the Prowler UI project. Analyze the full file conten
**RULES TO CHECK:**
1. React Imports: NO `import * as React` or `import React, {` → Use `import { useState }`
2. TypeScript: NO union types like `type X = "a" | "b"` → Use const-based: `const X = {...} as const`
3. Tailwind: NO `var()` or hex colors in className → Use Tailwind utilities and semantic color classes.
3. Tailwind: NO `var()` or hex colors in className → Use Tailwind utilities and semantic color classes. Exception: `var()` is allowed when passing colors to chart/graph components that require CSS color strings (not Tailwind classes) for their APIs.
4. cn(): Use for merging multiple classes or for conditionals (handles Tailwind conflicts with twMerge) → `cn(BUTTON_STYLES.base, BUTTON_STYLES.active, isLoading && "opacity-50")`
5. React 19: NO `useMemo`/`useCallback` without reason
6. Zod v4: Use `.min(1)` not `.nonempty()`, `z.email()` not `z.string().email()`. All inputs must be validated with Zod.
+9
View File
@@ -6,6 +6,8 @@ All notable changes to the **Prowler UI** are documented in this file.
### 🚀 Added
- Risk Plot component with interactive legend and severity navigation to Overview page [(#9469)](https://github.com/prowler-cloud/prowler/pull/9469)
- Navigation progress bar for page transitions using Next.js `onRouterTransitionStart` [(#9465)](https://github.com/prowler-cloud/prowler/pull/9465)
- Finding Severity Over Time chart component to Overview page [(#9405)](https://github.com/prowler-cloud/prowler/pull/9405)
- Attack Surface component to Overview page [(#9412)](https://github.com/prowler-cloud/prowler/pull/9412)
@@ -22,6 +24,13 @@ All notable changes to the **Prowler UI** are documented in this file.
---
## [1.14.3] (Prowler Unreleased)
### 🐞 Fixed
- Show top failed requirements in compliance specific view for compliance without sections [(#9471)](https://github.com/prowler-cloud/prowler/pull/9471)
---
## [1.14.2] (Prowler v5.14.2)
### 🐞 Fixed
+1
View File
@@ -3,6 +3,7 @@ export * from "./attack-surface";
export * from "./findings";
export * from "./providers";
export * from "./regions";
export * from "./risk-plot";
export * from "./services";
export * from "./severity-trends";
export * from "./threat-score";
+4
View File
@@ -0,0 +1,4 @@
// Risk Plot Actions
export * from "./risk-plot";
export * from "./risk-plot.adapter";
export * from "./types/risk-plot.types";
@@ -0,0 +1,94 @@
import { getProviderDisplayName } from "@/types/providers";
import type {
ProviderRiskData,
RiskPlotDataResponse,
RiskPlotPoint,
} from "./types/risk-plot.types";
/**
* Calculates percentage with proper rounding.
*/
function calculatePercentage(value: number, total: number): number {
if (total === 0) return 0;
return Math.round((value / total) * 100);
}
/**
* Adapts raw provider risk data to the format expected by RiskPlotClient.
*
* @param providersRiskData - Array of risk data per provider from API
* @returns Formatted data for the Risk Plot scatter chart
*/
export function adaptToRiskPlotData(
providersRiskData: ProviderRiskData[],
): RiskPlotDataResponse {
const points: RiskPlotPoint[] = [];
const providersWithoutData: RiskPlotDataResponse["providersWithoutData"] = [];
for (const providerData of providersRiskData) {
// Skip providers without ThreatScore data (no completed scans)
if (providerData.overallScore === null) {
providersWithoutData.push({
id: providerData.providerId,
name: providerData.providerName,
type: providerData.providerType,
});
continue;
}
// Convert provider type to display name (aws -> AWS, gcp -> Google, etc.)
const providerDisplayName = getProviderDisplayName(
providerData.providerType,
);
// Build severity data for the horizontal bar chart with percentages
let severityData;
let totalFailedFindings = 0;
if (providerData.severity) {
const { critical, high, medium, low, informational } =
providerData.severity;
totalFailedFindings = critical + high + medium + low + informational;
severityData = [
{
name: "Critical",
value: critical,
percentage: calculatePercentage(critical, totalFailedFindings),
},
{
name: "High",
value: high,
percentage: calculatePercentage(high, totalFailedFindings),
},
{
name: "Medium",
value: medium,
percentage: calculatePercentage(medium, totalFailedFindings),
},
{
name: "Low",
value: low,
percentage: calculatePercentage(low, totalFailedFindings),
},
{
name: "Info",
value: informational,
percentage: calculatePercentage(informational, totalFailedFindings),
},
];
}
points.push({
x: providerData.overallScore ?? 0,
y: totalFailedFindings,
provider: providerDisplayName,
name: providerData.providerName,
providerId: providerData.providerId,
severityData,
});
}
return { points, providersWithoutData };
}
@@ -0,0 +1,69 @@
"use server";
import { getFindingsBySeverity } from "@/actions/overview/findings";
import { getThreatScore } from "@/actions/overview/threat-score";
import { ProviderProps } from "@/types/providers";
import { ProviderRiskData } from "./types/risk-plot.types";
/**
* Fetches risk data for a single provider.
* Combines ThreatScore and Severity data in parallel.
*/
export async function getProviderRiskData(
provider: ProviderProps,
): Promise<ProviderRiskData> {
const providerId = provider.id;
const providerType = provider.attributes.provider;
const providerName = provider.attributes.alias || provider.attributes.uid;
// Fetch ThreatScore and Severity in parallel
const [threatScoreResponse, severityResponse] = await Promise.all([
getThreatScore({
filters: {
provider_id: providerId,
include: "provider",
},
}),
getFindingsBySeverity({
filters: {
"filter[provider_id]": providerId,
"filter[status]": "FAIL",
},
}),
]);
// Extract ThreatScore data
// When filtering by single provider, API returns array with one item (not aggregated)
const threatScoreData = threatScoreResponse?.data?.[0]?.attributes;
const overallScore = threatScoreData?.overall_score
? parseFloat(threatScoreData.overall_score)
: null;
const failedFindings = threatScoreData?.failed_findings ?? 0;
// Extract Severity data
const severityData = severityResponse?.data?.attributes ?? null;
return {
providerId,
providerType,
providerName,
overallScore,
failedFindings,
severity: severityData,
};
}
/**
* Fetches risk data for multiple providers in parallel.
* Used by the Risk Plot SSR component.
*/
export async function getProvidersRiskData(
providers: ProviderProps[],
): Promise<ProviderRiskData[]> {
const riskDataPromises = providers.map((provider) =>
getProviderRiskData(provider),
);
return Promise.all(riskDataPromises);
}
@@ -0,0 +1,58 @@
// Risk Plot Types
// Data structures for the Risk Plot scatter chart
import type { BarDataPoint } from "@/components/graphs/types";
/**
* Represents a single point in the Risk Plot scatter chart.
* Each point represents a provider/account with its risk metrics.
*/
export interface RiskPlotPoint {
/** ThreatScore (0-100 scale, higher = better) */
x: number;
/** Total failed findings count */
y: number;
/** Provider type display name (AWS, Azure, Google, etc.) */
provider: string;
/** Provider alias or UID (account identifier) */
name: string;
/** Provider ID for filtering/navigation */
providerId: string;
/** Severity breakdown for the horizontal bar chart */
severityData?: BarDataPoint[];
}
/**
* Raw data from the API combined for a single provider.
* Used internally before transformation to RiskPlotPoint.
*/
export interface ProviderRiskData {
providerId: string;
providerType: string;
providerName: string;
/** ThreatScore overall_score (0-100 scale) */
overallScore: number | null;
/** Failed findings from ThreatScore snapshot */
failedFindings: number;
/** Severity breakdown */
severity: {
critical: number;
high: number;
medium: number;
low: number;
informational: number;
} | null;
}
/**
* Response structure for risk plot data fetching.
*/
export interface RiskPlotDataResponse {
points: RiskPlotPoint[];
/** Providers that have no data or no completed scans */
providersWithoutData: Array<{
id: string;
name: string;
type: string;
}>;
}
@@ -1,5 +1,9 @@
"use server";
import {
getDateFromForTimeRange,
type TimeRange,
} from "@/app/(prowler)/_new-overview/severity-over-time/_constants/time-range.constants";
import { apiBaseUrl, getAuthHeaders } from "@/lib";
import { handleApiResponse } from "@/lib/server-actions-helper";
@@ -9,20 +13,6 @@ import {
FindingsSeverityOverTimeResponse,
} from "./types";
const TIME_RANGE_VALUES = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
type TimeRange = (typeof TIME_RANGE_VALUES)[keyof typeof TIME_RANGE_VALUES];
const TIME_RANGE_DAYS: Record<TimeRange, number> = {
"5D": 5,
"1W": 7,
"1M": 30,
};
export type SeverityTrendsResult =
| { status: "success"; data: AdaptedSeverityTrendsResponse }
| { status: "empty" }
@@ -76,21 +66,9 @@ export const getSeverityTrendsByTimeRange = async ({
timeRange: TimeRange;
filters?: Record<string, string | string[] | undefined>;
}): Promise<SeverityTrendsResult> => {
const days = TIME_RANGE_DAYS[timeRange];
if (!days) {
console.error("Invalid time range provided");
return { status: "error" };
}
const endDate = new Date();
const startDate = new Date(endDate.getTime() - days * 24 * 60 * 60 * 1000);
const dateFrom = startDate.toISOString().split("T")[0];
const dateFilters = {
...filters,
date_from: dateFrom,
"filter[date_from]": getDateFromForTimeRange(timeRange),
};
return getFindingsSeverityTrends({ filters: dateFilters });
+4 -2
View File
@@ -3,9 +3,10 @@ import "@/styles/globals.css";
import { GoogleTagManager } from "@next/third-parties/google";
import { Metadata, Viewport } from "next";
import { redirect } from "next/navigation";
import { ReactNode } from "react";
import { auth } from "@/auth.config";
import { Toaster } from "@/components/ui";
import { NavigationProgress, Toaster } from "@/components/ui";
import { fontSans } from "@/config/fonts";
import { siteConfig } from "@/config/site";
import { cn } from "@/lib";
@@ -33,7 +34,7 @@ export const viewport: Viewport = {
export default async function RootLayout({
children,
}: {
children: React.ReactNode;
children: ReactNode;
}) {
const session = await auth();
@@ -52,6 +53,7 @@ export default async function RootLayout({
)}
>
<Providers themeProps={{ attribute: "class", defaultTheme: "dark" }}>
<NavigationProgress />
{children}
<Toaster />
<GoogleTagManager
@@ -11,15 +11,15 @@ export const GRAPH_TABS = [
id: "threat-map",
label: "Threat Map",
},
{
id: "risk-plot",
label: "Risk Plot",
},
// TODO: Uncomment when ready to enable other tabs
// {
// id: "risk-radar",
// label: "Risk Radar",
// },
// {
// id: "risk-plot",
// label: "Risk Plot",
// },
] as const;
export type TabId = (typeof GRAPH_TABS)[number]["id"];
@@ -7,9 +7,9 @@ import { GraphsTabsClient } from "./_components/graphs-tabs-client";
import { GRAPH_TABS, type TabId } from "./_config/graphs-tabs-config";
import { FindingsViewSSR } from "./findings-view";
import { RiskPipelineViewSSR } from "./risk-pipeline-view/risk-pipeline-view.ssr";
import { RiskPlotSSR } from "./risk-plot/risk-plot.ssr";
import { ThreatMapViewSSR } from "./threat-map-view/threat-map-view.ssr";
// TODO: Uncomment when ready to enable other tabs
// import { RiskPlotView } from "./risk-plot/risk-plot-view";
// import { RiskRadarViewSSR } from "./risk-radar-view/risk-radar-view.ssr";
const LoadingFallback = () => (
@@ -25,9 +25,9 @@ const GRAPH_COMPONENTS: Record<TabId, GraphComponent> = {
findings: FindingsViewSSR as GraphComponent,
"risk-pipeline": RiskPipelineViewSSR as GraphComponent,
"threat-map": ThreatMapViewSSR as GraphComponent,
"risk-plot": RiskPlotSSR as GraphComponent,
// TODO: Uncomment when ready to enable other tabs
// "risk-radar": RiskRadarViewSSR as GraphComponent,
// "risk-plot": RiskPlotView as GraphComponent,
};
interface GraphsTabsWrapperProps {
@@ -1,9 +1,19 @@
"use client";
/**
* Risk Plot Client Component
*
* NOTE: This component uses CSS variables (var()) for Recharts styling.
* Recharts SVG-based components (Scatter, XAxis, YAxis, CartesianGrid, etc.)
* do not support Tailwind classes and require raw color values or CSS variables.
* This is a documented limitation of the Recharts library.
* @see https://recharts.org/en-US/api
*/
import { useRouter, useSearchParams } from "next/navigation";
import { useState } from "react";
import {
CartesianGrid,
Legend,
ResponsiveContainer,
Scatter,
ScatterChart,
@@ -12,6 +22,7 @@ import {
YAxis,
} from "recharts";
import type { RiskPlotPoint } from "@/actions/overview/risk-plot";
import { HorizontalBarChart } from "@/components/graphs/horizontal-bar-chart";
import { AlertPill } from "@/components/graphs/shared/alert-pill";
import { ChartLegend } from "@/components/graphs/shared/chart-legend";
@@ -19,69 +30,83 @@ import {
AXIS_FONT_SIZE,
CustomXAxisTick,
} from "@/components/graphs/shared/custom-axis-tick";
import { getSeverityColorByRiskScore } from "@/components/graphs/shared/utils";
import type { BarDataPoint } from "@/components/graphs/types";
import { mapProviderFiltersForFindings } from "@/lib/provider-helpers";
import { SEVERITY_FILTER_MAP } from "@/types/severities";
const PROVIDER_COLORS = {
AWS: "var(--color-bg-data-aws)",
Azure: "var(--color-bg-data-azure)",
Google: "var(--color-bg-data-gcp)",
};
// Threat Score colors (0-100 scale, higher = better)
const THREAT_COLORS = {
DANGER: "var(--bg-fail-primary)", // 0-30
WARNING: "var(--bg-warning-primary)", // 31-60
SUCCESS: "var(--bg-pass-primary)", // 61-100
} as const;
export interface ScatterPoint {
x: number;
y: number;
provider: string;
name: string;
severityData?: BarDataPoint[];
/**
* Get color based on ThreatScore (0-100 scale, higher = better)
*/
function getThreatScoreColor(score: number): string {
if (score > 60) return THREAT_COLORS.SUCCESS;
if (score > 30) return THREAT_COLORS.WARNING;
return THREAT_COLORS.DANGER;
}
// Provider colors from globals.css
const PROVIDER_COLORS: Record<string, string> = {
AWS: "var(--bg-data-aws)",
Azure: "var(--bg-data-azure)",
"Google Cloud": "var(--bg-data-gcp)",
Kubernetes: "var(--bg-data-kubernetes)",
"Microsoft 365": "var(--bg-data-m365)",
GitHub: "var(--bg-data-github)",
"MongoDB Atlas": "var(--bg-data-azure)",
"Infrastructure as Code": "var(--bg-data-kubernetes)",
"Oracle Cloud Infrastructure": "var(--bg-data-gcp)",
};
interface RiskPlotClientProps {
data: ScatterPoint[];
data: RiskPlotPoint[];
}
interface TooltipProps {
active?: boolean;
payload?: Array<{ payload: ScatterPoint }>;
payload?: Array<{ payload: RiskPlotPoint }>;
}
interface ScatterDotProps {
// Props that Recharts passes to the shape component
interface RechartsScatterDotProps {
cx: number;
cy: number;
payload: ScatterPoint;
selectedPoint: ScatterPoint | null;
onSelectPoint: (point: ScatterPoint) => void;
allData: ScatterPoint[];
payload: RiskPlotPoint;
}
interface LegendProps {
payload?: Array<{ value: string; color: string }>;
// Extended props for our custom scatter dot component
interface ScatterDotProps extends RechartsScatterDotProps {
selectedPoint: RiskPlotPoint | null;
onSelectPoint: (point: RiskPlotPoint) => void;
allData: RiskPlotPoint[];
selectedProvider: string | null;
}
const CustomTooltip = ({ active, payload }: TooltipProps) => {
if (active && payload && payload.length) {
const data = payload[0].payload;
const severityColor = getSeverityColorByRiskScore(data.x);
if (!active || !payload?.length) return null;
return (
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
{data.name}
</p>
<p className="text-text-neutral-secondary text-sm font-medium">
{/* Dynamic color from getSeverityColorByRiskScore - required inline style */}
<span style={{ color: severityColor, fontWeight: "bold" }}>
{data.x}
</span>{" "}
Risk Score
</p>
<div className="mt-2">
<AlertPill value={data.y} />
</div>
const { name, x, y } = payload[0].payload;
const scoreColor = getThreatScoreColor(x);
return (
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
{name}
</p>
<p className="text-text-neutral-secondary text-sm font-medium">
<span style={{ color: scoreColor, fontWeight: "bold" }}>{x}%</span>{" "}
Threat Score
</p>
<div className="mt-2">
<AlertPill value={y} />
</div>
);
}
return null;
</div>
);
};
const CustomScatterDot = ({
@@ -91,24 +116,31 @@ const CustomScatterDot = ({
selectedPoint,
onSelectPoint,
allData,
selectedProvider,
}: ScatterDotProps) => {
const isSelected = selectedPoint?.name === payload.name;
const size = isSelected ? 18 : 8;
const selectedColor = "var(--bg-button-primary)"; // emerald-400
const selectedColor = "var(--bg-button-primary)";
const fill = isSelected
? selectedColor
: PROVIDER_COLORS[payload.provider as keyof typeof PROVIDER_COLORS] ||
"var(--color-text-neutral-tertiary)";
: PROVIDER_COLORS[payload.provider] || "var(--color-text-neutral-tertiary)";
const isFaded =
selectedProvider !== null && payload.provider !== selectedProvider;
const handleClick = () => {
const fullDataItem = allData?.find(
(d: ScatterPoint) => d.name === payload.name,
);
const fullDataItem = allData?.find((d) => d.name === payload.name);
onSelectPoint?.(fullDataItem || payload);
};
return (
<g style={{ cursor: "pointer" }} onClick={handleClick}>
<g
style={{
cursor: "pointer",
opacity: isFaded ? 0.2 : 1,
transition: "opacity 0.2s",
}}
onClick={handleClick}
>
{isSelected && (
<>
<circle
@@ -143,60 +175,86 @@ const CustomScatterDot = ({
);
};
const CustomLegend = ({ payload }: LegendProps) => {
const items =
payload?.map((entry: { value: string; color: string }) => ({
label: entry.value,
color: entry.color,
})) || [];
return <ChartLegend items={items} />;
};
/**
* Factory function that creates a scatter dot shape component with closure over selection state.
* Recharts shape prop types the callback parameter as `unknown` due to its flexible API.
* We safely cast to RechartsScatterDotProps since we know the actual shape of props passed by Scatter.
* @see https://recharts.org/en-US/api/Scatter#shape
*/
function createScatterDotShape(
selectedPoint: ScatterPoint | null,
onSelectPoint: (point: ScatterPoint) => void,
allData: ScatterPoint[],
) {
const ScatterDotShape = (props: unknown) => {
const dotProps = props as Omit<
ScatterDotProps,
"selectedPoint" | "onSelectPoint" | "allData"
>;
return (
<CustomScatterDot
{...dotProps}
selectedPoint={selectedPoint}
onSelectPoint={onSelectPoint}
allData={allData}
/>
);
};
selectedPoint: RiskPlotPoint | null,
onSelectPoint: (point: RiskPlotPoint) => void,
allData: RiskPlotPoint[],
selectedProvider: string | null,
): (props: unknown) => React.JSX.Element {
const ScatterDotShape = (props: unknown) => (
<CustomScatterDot
{...(props as RechartsScatterDotProps)}
selectedPoint={selectedPoint}
onSelectPoint={onSelectPoint}
allData={allData}
selectedProvider={selectedProvider}
/>
);
ScatterDotShape.displayName = "ScatterDotShape";
return ScatterDotShape;
}
export function RiskPlotClient({ data }: RiskPlotClientProps) {
const [selectedPoint, setSelectedPoint] = useState<ScatterPoint | null>(null);
const router = useRouter();
const searchParams = useSearchParams();
const [selectedPoint, setSelectedPoint] = useState<RiskPlotPoint | null>(
null,
);
const [selectedProvider, setSelectedProvider] = useState<string | null>(null);
const dataByProvider = data.reduce(
// Group data by provider for separate Scatter series
const dataByProvider = data.reduce<Record<string, RiskPlotPoint[]>>(
(acc, point) => {
const provider = point.provider;
if (!acc[provider]) {
acc[provider] = [];
}
acc[provider].push(point);
(acc[point.provider] ??= []).push(point);
return acc;
},
{} as Record<string, typeof data>,
{},
);
const handleSelectPoint = (point: ScatterPoint) => {
if (selectedPoint?.name === point.name) {
setSelectedPoint(null);
} else {
setSelectedPoint(point);
const providers = Object.keys(dataByProvider);
const handleSelectPoint = (point: RiskPlotPoint) => {
setSelectedPoint((current) =>
current?.name === point.name ? null : point,
);
};
const handleProviderClick = (provider: string) => {
setSelectedProvider((current) => (current === provider ? null : provider));
};
const handleBarClick = (dataPoint: BarDataPoint) => {
if (!selectedPoint) return;
// Build the URL with current filters
const params = new URLSearchParams(searchParams.toString());
// Transform provider filters (provider_id__in -> provider__in)
mapProviderFiltersForFindings(params);
// Add severity filter
const severity = SEVERITY_FILTER_MAP[dataPoint.name];
if (severity) {
params.set("filter[severity__in]", severity);
}
// Add provider filter for the selected point
params.set("filter[provider__in]", selectedPoint.providerId);
// Add exclude muted findings filter
params.set("filter[muted]", "false");
// Filter by FAIL findings
params.set("filter[status__in]", "FAIL");
// Navigate to findings page
router.push(`/findings?${params.toString()}`);
};
return (
@@ -204,26 +262,18 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<div className="flex flex-1 gap-12">
{/* Plot Section - in Card */}
<div className="flex basis-[70%] flex-col">
<div
className="flex flex-1 flex-col rounded-lg border p-4"
style={{
borderColor: "var(--border-neutral-primary)",
backgroundColor: "var(--bg-neutral-secondary)",
}}
>
<div className="border-border-neutral-primary bg-bg-neutral-secondary flex flex-1 flex-col rounded-lg border p-4">
<div className="mb-4">
<h3
className="text-lg font-semibold"
style={{ color: "var(--text-neutral-primary)" }}
>
<h3 className="text-text-neutral-primary text-lg font-semibold">
Risk Plot
</h3>
<p className="text-text-neutral-tertiary mt-1 text-xs">
Threat Score is severity-weighted, not quantity-based. Higher
severity findings have greater impact on the score.
</p>
</div>
<div
className="relative w-full flex-1"
style={{ minHeight: "400px" }}
>
<div className="relative min-h-[400px] w-full flex-1">
<ResponsiveContainer width="100%" height="100%">
<ScatterChart
margin={{ top: 20, right: 30, bottom: 60, left: 60 }}
@@ -237,24 +287,24 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
<XAxis
type="number"
dataKey="x"
name="Risk Score"
name="Threat Score"
label={{
value: "Risk Score",
value: "Threat Score",
position: "bottom",
offset: 10,
fill: "var(--color-text-neutral-secondary)",
}}
tick={CustomXAxisTick}
tickLine={false}
domain={[0, 10]}
domain={[0, 100]}
axisLine={false}
/>
<YAxis
type="number"
dataKey="y"
name="Failed Findings"
name="Fail Findings"
label={{
value: "Failed Findings",
value: "Fail Findings",
angle: -90,
position: "left",
offset: 10,
@@ -268,30 +318,43 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
axisLine={false}
/>
<Tooltip content={<CustomTooltip />} />
<Legend
content={<CustomLegend />}
wrapperStyle={{ paddingTop: "40px" }}
/>
{Object.entries(dataByProvider).map(([provider, points]) => (
<Scatter
key={provider}
name={provider}
data={points}
fill={
PROVIDER_COLORS[
provider as keyof typeof PROVIDER_COLORS
] || "var(--color-text-neutral-tertiary)"
PROVIDER_COLORS[provider] ||
"var(--color-text-neutral-tertiary)"
}
shape={createScatterDotShape(
selectedPoint,
handleSelectPoint,
data,
selectedProvider,
)}
/>
))}
</ScatterChart>
</ResponsiveContainer>
</div>
{/* Interactive Legend - below chart */}
<div className="mt-4 flex flex-col items-start gap-2">
<p className="text-text-neutral-tertiary pl-2 text-xs">
Click to filter by provider
</p>
<ChartLegend
items={providers.map((p) => ({
label: p,
color:
PROVIDER_COLORS[p] || "var(--color-text-neutral-tertiary)",
dataKey: p,
}))}
selectedItem={selectedProvider}
onItemClick={handleProviderClick}
/>
</div>
</div>
</div>
@@ -300,28 +363,22 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
{selectedPoint && selectedPoint.severityData ? (
<div className="flex w-full flex-col">
<div className="mb-4">
<h4
className="text-base font-semibold"
style={{ color: "var(--text-neutral-primary)" }}
>
<h4 className="text-text-neutral-primary text-base font-semibold">
{selectedPoint.name}
</h4>
<p
className="text-xs"
style={{ color: "var(--text-neutral-tertiary)" }}
>
Risk Score: {selectedPoint.x} | Failed Findings:{" "}
<p className="text-text-neutral-tertiary text-xs">
Threat Score: {selectedPoint.x}% | Fail Findings:{" "}
{selectedPoint.y}
</p>
</div>
<HorizontalBarChart data={selectedPoint.severityData} />
<HorizontalBarChart
data={selectedPoint.severityData}
onBarClick={handleBarClick}
/>
</div>
) : (
<div className="flex w-full items-center justify-center text-center">
<p
className="text-sm"
style={{ color: "var(--text-neutral-tertiary)" }}
>
<p className="text-text-neutral-tertiary text-sm">
Select a point on the plot to view details
</p>
</div>
@@ -1,191 +0,0 @@
import { RiskPlotClient, type ScatterPoint } from "./risk-plot-client";
// Mock data - Risk Score (0-10) vs Failed Findings count
const mockScatterData: ScatterPoint[] = [
{
x: 9.2,
y: 1456,
provider: "AWS",
name: "Amazon RDS",
severityData: [
{ name: "Critical", value: 456 },
{ name: "High", value: 600 },
{ name: "Medium", value: 250 },
{ name: "Low", value: 120 },
{ name: "Info", value: 30 },
],
},
{
x: 8.5,
y: 892,
provider: "AWS",
name: "Amazon EC2",
severityData: [
{ name: "Critical", value: 280 },
{ name: "High", value: 350 },
{ name: "Medium", value: 180 },
{ name: "Low", value: 70 },
{ name: "Info", value: 12 },
],
},
{
x: 7.1,
y: 445,
provider: "AWS",
name: "Amazon S3",
severityData: [
{ name: "Critical", value: 140 },
{ name: "High", value: 180 },
{ name: "Medium", value: 90 },
{ name: "Low", value: 30 },
{ name: "Info", value: 5 },
],
},
{
x: 6.3,
y: 678,
provider: "AWS",
name: "AWS Lambda",
severityData: [
{ name: "Critical", value: 214 },
{ name: "High", value: 270 },
{ name: "Medium", value: 135 },
{ name: "Low", value: 54 },
{ name: "Info", value: 5 },
],
},
{
x: 4.2,
y: 156,
provider: "AWS",
name: "AWS Backup",
severityData: [
{ name: "Critical", value: 49 },
{ name: "High", value: 62 },
{ name: "Medium", value: 31 },
{ name: "Low", value: 12 },
{ name: "Info", value: 2 },
],
},
{
x: 8.8,
y: 1023,
provider: "Azure",
name: "Azure SQL Database",
severityData: [
{ name: "Critical", value: 323 },
{ name: "High", value: 410 },
{ name: "Medium", value: 205 },
{ name: "Low", value: 82 },
{ name: "Info", value: 3 },
],
},
{
x: 7.9,
y: 834,
provider: "Azure",
name: "Azure Virtual Machines",
severityData: [
{ name: "Critical", value: 263 },
{ name: "High", value: 334 },
{ name: "Medium", value: 167 },
{ name: "Low", value: 67 },
{ name: "Info", value: 3 },
],
},
{
x: 6.4,
y: 567,
provider: "Azure",
name: "Azure Storage",
severityData: [
{ name: "Critical", value: 179 },
{ name: "High", value: 227 },
{ name: "Medium", value: 113 },
{ name: "Low", value: 45 },
{ name: "Info", value: 3 },
],
},
{
x: 5.1,
y: 289,
provider: "Azure",
name: "Azure Key Vault",
severityData: [
{ name: "Critical", value: 91 },
{ name: "High", value: 115 },
{ name: "Medium", value: 58 },
{ name: "Low", value: 23 },
{ name: "Info", value: 2 },
],
},
{
x: 7.6,
y: 712,
provider: "Google",
name: "Cloud SQL",
severityData: [
{ name: "Critical", value: 225 },
{ name: "High", value: 285 },
{ name: "Medium", value: 142 },
{ name: "Low", value: 57 },
{ name: "Info", value: 3 },
],
},
{
x: 6.9,
y: 623,
provider: "Google",
name: "Compute Engine",
severityData: [
{ name: "Critical", value: 197 },
{ name: "High", value: 249 },
{ name: "Medium", value: 124 },
{ name: "Low", value: 50 },
{ name: "Info", value: 3 },
],
},
{
x: 5.8,
y: 412,
provider: "Google",
name: "Cloud Storage",
severityData: [
{ name: "Critical", value: 130 },
{ name: "High", value: 165 },
{ name: "Medium", value: 82 },
{ name: "Low", value: 33 },
{ name: "Info", value: 2 },
],
},
{
x: 4.5,
y: 198,
provider: "Google",
name: "Cloud Run",
severityData: [
{ name: "Critical", value: 63 },
{ name: "High", value: 79 },
{ name: "Medium", value: 39 },
{ name: "Low", value: 16 },
{ name: "Info", value: 1 },
],
},
{
x: 8.9,
y: 945,
provider: "AWS",
name: "Amazon RDS Aurora",
severityData: [
{ name: "Critical", value: 299 },
{ name: "High", value: 378 },
{ name: "Medium", value: 189 },
{ name: "Low", value: 76 },
{ name: "Info", value: 3 },
],
},
];
export function RiskPlotView() {
return <RiskPlotClient data={mockScatterData} />;
}
@@ -0,0 +1,91 @@
import { Info } from "lucide-react";
import {
adaptToRiskPlotData,
getProvidersRiskData,
} from "@/actions/overview/risk-plot";
import { getProviders } from "@/actions/providers";
import { SearchParamsProps } from "@/types";
import { pickFilterParams } from "../../_lib/filter-params";
import { RiskPlotClient } from "./risk-plot-client";
export async function RiskPlotSSR({
searchParams,
}: {
searchParams: SearchParamsProps;
}) {
const filters = pickFilterParams(searchParams);
const providerTypeFilter = filters["filter[provider_type__in]"];
const providerIdFilter = filters["filter[provider_id__in]"];
// Fetch all providers
const providersListResponse = await getProviders({ pageSize: 200 });
const allProviders = providersListResponse?.data || [];
// Filter providers based on search params
let filteredProviders = allProviders;
if (providerIdFilter) {
// Filter by specific provider IDs
const selectedIds = String(providerIdFilter)
.split(",")
.map((id) => id.trim());
filteredProviders = allProviders.filter((p) => selectedIds.includes(p.id));
} else if (providerTypeFilter) {
// Filter by provider types
const selectedTypes = String(providerTypeFilter)
.split(",")
.map((t) => t.trim().toLowerCase());
filteredProviders = allProviders.filter((p) =>
selectedTypes.includes(p.attributes.provider.toLowerCase()),
);
}
// No providers to show
if (filteredProviders.length === 0) {
return (
<div className="flex h-[460px] w-full items-center justify-center">
<div className="flex flex-col items-center gap-2 text-center">
<Info size={48} className="text-text-neutral-tertiary" />
<p className="text-text-neutral-secondary text-sm">
No providers available for the selected filters
</p>
</div>
</div>
);
}
// Fetch risk data for all filtered providers in parallel
const providersRiskData = await getProvidersRiskData(filteredProviders);
// Transform to chart format
const { points, providersWithoutData } =
adaptToRiskPlotData(providersRiskData);
// No data available
if (points.length === 0) {
return (
<div className="flex h-[460px] w-full items-center justify-center">
<div className="flex flex-col items-center gap-2 text-center">
<Info size={48} className="text-text-neutral-tertiary" />
<p className="text-text-neutral-secondary text-sm">
No risk data available for the selected providers
</p>
{providersWithoutData.length > 0 && (
<p className="text-text-neutral-tertiary text-xs">
{providersWithoutData.length} provider(s) have no completed scans
</p>
)}
</div>
</div>
);
}
return (
<div className="w-full flex-1 overflow-visible">
<RiskPlotClient data={points} />
</div>
);
}
@@ -7,12 +7,12 @@ import { getSeverityTrendsByTimeRange } from "@/actions/overview/severity-trends
import { LineChart } from "@/components/graphs/line-chart";
import { LineConfig, LineDataPoint } from "@/components/graphs/types";
import {
MUTED_COLOR,
SEVERITY_LEVELS,
SEVERITY_LINE_CONFIGS,
SeverityLevel,
} from "@/types/severities";
import { DEFAULT_TIME_RANGE } from "../_constants/time-range.constants";
import { type TimeRange, TimeRangeSelector } from "./time-range-selector";
interface FindingSeverityOverTimeProps {
@@ -24,7 +24,7 @@ export const FindingSeverityOverTime = ({
}: FindingSeverityOverTimeProps) => {
const router = useRouter();
const searchParams = useSearchParams();
const [timeRange, setTimeRange] = useState<TimeRange>("5D");
const [timeRange, setTimeRange] = useState<TimeRange>(DEFAULT_TIME_RANGE);
const [data, setData] = useState<LineDataPoint[]>(initialData);
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
@@ -39,6 +39,9 @@ export const FindingSeverityOverTime = ({
const params = new URLSearchParams();
params.set("filter[inserted_at]", point.date);
// Always filter by FAIL status since this chart shows failed findings
params.set("filter[status__in]", "FAIL");
// Add scan_ids filter
if (
point.scan_ids &&
@@ -96,15 +99,6 @@ export const FindingSeverityOverTime = ({
// Build line configurations from shared severity configs
const lines: LineConfig[] = [...SEVERITY_LINE_CONFIGS];
// Only add muted line if data contains it
if (data.some((item) => item.muted !== undefined)) {
lines.push({
dataKey: "muted",
color: MUTED_COLOR,
label: "Muted",
});
}
// Calculate x-axis interval based on data length to show all labels without overlap
const getXAxisInterval = (): number => {
const dataLength = data.length;
@@ -2,14 +2,12 @@
import { cn } from "@/lib/utils";
const TIME_RANGE_OPTIONS = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
import {
TIME_RANGE_OPTIONS,
type TimeRange,
} from "../_constants/time-range.constants";
export type TimeRange =
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
export type { TimeRange };
interface TimeRangeSelectorProps {
value: TimeRange;
@@ -0,0 +1 @@
export * from "./time-range.constants";
@@ -0,0 +1,23 @@
export const TIME_RANGE_OPTIONS = {
FIVE_DAYS: "5D",
ONE_WEEK: "1W",
ONE_MONTH: "1M",
} as const;
export type TimeRange =
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
export const TIME_RANGE_DAYS: Record<TimeRange, number> = {
"5D": 5,
"1W": 7,
"1M": 30,
};
export const DEFAULT_TIME_RANGE: TimeRange = "5D";
export const getDateFromForTimeRange = (timeRange: TimeRange): string => {
const days = TIME_RANGE_DAYS[timeRange];
const date = new Date();
date.setDate(date.getDate() - days);
return date.toISOString().split("T")[0];
};
@@ -1,10 +1,11 @@
import { getFindingsSeverityTrends } from "@/actions/overview/severity-trends";
import { getSeverityTrendsByTimeRange } from "@/actions/overview/severity-trends";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/shadcn";
import { pickFilterParams } from "../_lib/filter-params";
import { SSRComponentProps } from "../_types";
import { FindingSeverityOverTime } from "./_components/finding-severity-over-time";
import { FindingSeverityOverTimeSkeleton } from "./_components/finding-severity-over-time.skeleton";
import { DEFAULT_TIME_RANGE } from "./_constants/time-range.constants";
export { FindingSeverityOverTimeSkeleton };
@@ -25,7 +26,11 @@ export const FindingSeverityOverTimeSSR = async ({
searchParams,
}: SSRComponentProps) => {
const filters = pickFilterParams(searchParams);
const result = await getFindingsSeverityTrends({ filters });
const result = await getSeverityTrendsByTimeRange({
timeRange: DEFAULT_TIME_RANGE,
filters,
});
if (result.status === "error") {
return <EmptyState message="Failed to load severity trends data" />;
@@ -195,7 +195,7 @@ const SSRComplianceContent = async ({
{ pass: 0, fail: 0, manual: 0 },
);
const accordionItems = mapper.toAccordionItems(data, scanId);
const topFailedSections = mapper.getTopFailedSections(data);
const topFailedResult = mapper.getTopFailedSections(data);
return (
<div className="flex flex-col gap-8">
@@ -205,7 +205,10 @@ const SSRComplianceContent = async ({
fail={totalRequirements.fail}
manual={totalRequirements.manual}
/>
<TopFailedSectionsCard sections={topFailedSections} />
<TopFailedSectionsCard
sections={topFailedResult.items}
dataType={topFailedResult.type}
/>
{/* <SectionsFailureRateCard categories={categoryHeatmapData} /> */}
</div>
+4 -2
View File
@@ -2,10 +2,11 @@ import "@/styles/globals.css";
import * as Sentry from "@sentry/nextjs";
import { Metadata, Viewport } from "next";
import React from "react";
import { ReactNode } from "react";
import { getProviders } from "@/actions/providers";
import MainLayout from "@/components/ui/main-layout/main-layout";
import { NavigationProgress } from "@/components/ui/navigation-progress";
import { Toaster } from "@/components/ui/toast";
import { fontSans } from "@/config/fonts";
import { siteConfig } from "@/config/site";
@@ -38,7 +39,7 @@ export const viewport: Viewport = {
export default async function RootLayout({
children,
}: {
children: React.ReactNode;
children: ReactNode;
}) {
const providersData = await getProviders({ page: 1, pageSize: 1 });
const hasProviders = !!(providersData?.data && providersData.data.length > 0);
@@ -54,6 +55,7 @@ export default async function RootLayout({
)}
>
<Providers themeProps={{ attribute: "class", defaultTheme: "dark" }}>
<NavigationProgress />
<StoreInitializer values={{ hasProviders }} />
<MainLayout>{children}</MainLayout>
<Toaster />
@@ -3,14 +3,20 @@
import { HorizontalBarChart } from "@/components/graphs/horizontal-bar-chart";
import { BarDataPoint } from "@/components/graphs/types";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/shadcn";
import { FailedSection } from "@/types/compliance";
import {
FailedSection,
TOP_FAILED_DATA_TYPE,
TopFailedDataType,
} from "@/types/compliance";
interface TopFailedSectionsCardProps {
sections: FailedSection[];
dataType?: TopFailedDataType;
}
export function TopFailedSectionsCard({
sections,
dataType = TOP_FAILED_DATA_TYPE.SECTIONS,
}: TopFailedSectionsCardProps) {
// Transform FailedSection[] to BarDataPoint[]
const total = sections.reduce((sum, section) => sum + section.total, 0);
@@ -22,13 +28,18 @@ export function TopFailedSectionsCard({
color: "var(--bg-fail-primary)",
}));
const title =
dataType === TOP_FAILED_DATA_TYPE.REQUIREMENTS
? "Top Failed Requirements"
: "Top Failed Sections";
return (
<Card
variant="base"
className="flex min-h-[372px] w-full flex-col sm:min-w-[500px]"
>
<CardHeader>
<CardTitle>Top Failed Sections</CardTitle>
<CardTitle>{title}</CardTitle>
</CardHeader>
<CardContent className="flex flex-1 items-center justify-start">
<HorizontalBarChart data={barData} />
+34 -8
View File
@@ -68,10 +68,31 @@ const CustomLineTooltip = ({
const typedPayload = payload as unknown as TooltipPayloadItem[];
// Filter payload if a line is selected or hovered
const displayPayload = filterLine
const filteredPayload = filterLine
? typedPayload.filter((item) => item.dataKey === filterLine)
: typedPayload;
// Sort by severity order: critical, high, medium, low, informational
const severityOrder = [
"critical",
"high",
"medium",
"low",
"informational",
] as const;
const displayPayload = [...filteredPayload].sort((a, b) => {
const aIndex = severityOrder.indexOf(
a.dataKey as (typeof severityOrder)[number],
);
const bIndex = severityOrder.indexOf(
b.dataKey as (typeof severityOrder)[number],
);
// Items not in severityOrder go to the end
if (aIndex === -1) return 1;
if (bIndex === -1) return -1;
return aIndex - bIndex;
});
if (displayPayload.length === 0) {
return null;
}
@@ -96,12 +117,17 @@ const CustomLineTooltip = ({
return (
<div key={item.dataKey} className="space-y-1">
<div className="flex items-center gap-2">
<div
className="h-2 w-2 rounded-full"
style={{ backgroundColor: item.stroke }}
/>
<span className="text-text-neutral-primary text-sm">
<div className="flex items-center justify-between gap-4">
<div className="flex items-center gap-2">
<div
className="h-2 w-2 rounded-full"
style={{ backgroundColor: item.stroke }}
/>
<span className="text-text-neutral-secondary text-sm">
{item.name}
</span>
</div>
<span className="text-text-neutral-primary text-sm font-medium">
{item.value}
</span>
</div>
@@ -260,7 +286,7 @@ export function LineChart({
<div className="mt-4 flex flex-col items-start gap-2">
<p className="text-text-neutral-tertiary pl-2 text-xs">
Click to filter by severity.
Click to filter by severity
</p>
<ChartLegend
items={legendItems}
+2 -2
View File
@@ -32,11 +32,11 @@ export function AlertPill({
>
<AlertTriangle
size={iconSize}
style={{ color: "var(--color-text-text-error)" }}
style={{ color: "var(--color-text-error-primary)" }}
/>
<span
className={cn(textSizeClass, "font-semibold")}
style={{ color: "var(--color-text-text-error)" }}
style={{ color: "var(--color-text-error-primary)" }}
>
{value}
</span>
+1
View File
@@ -12,6 +12,7 @@ export * from "./feedback-banner/feedback-banner";
export * from "./headers/navigation-header";
export * from "./label/Label";
export * from "./main-layout/main-layout";
export * from "./navigation-progress";
export * from "./select";
export * from "./sidebar";
export * from "./toast";
@@ -0,0 +1,7 @@
export { NavigationProgress } from "./navigation-progress";
export {
cancelProgress,
completeProgress,
startProgress,
useNavigationProgress,
} from "./use-navigation-progress";
@@ -0,0 +1,42 @@
"use client";
import { useEffect, useState } from "react";
import { cn } from "@/lib";
import { useNavigationProgress } from "./use-navigation-progress";
const HIDE_DELAY_MS = 200;
export function NavigationProgress() {
const { isLoading, progress } = useNavigationProgress();
const [visible, setVisible] = useState(false);
useEffect(() => {
if (isLoading) return setVisible(true);
const timeout = setTimeout(() => setVisible(false), HIDE_DELAY_MS);
return () => clearTimeout(timeout);
}, [isLoading]);
if (!visible) return null;
return (
<div
className="fixed top-0 left-0 z-[99999] h-[3px] w-full"
role="progressbar"
aria-valuenow={progress}
aria-valuemin={0}
aria-valuemax={100}
aria-label="Page loading progress"
>
<div
className={cn(
"bg-button-primary h-full transition-all duration-200 ease-out",
isLoading && "shadow-progress-glow",
)}
style={{ width: `${progress}%` }}
/>
</div>
);
}
@@ -0,0 +1,106 @@
"use client";
import { usePathname, useSearchParams } from "next/navigation";
import { useEffect, useSyncExternalStore } from "react";
interface ProgressState {
isLoading: boolean;
progress: number;
}
// Global state
let state: ProgressState = { isLoading: false, progress: 0 };
const listeners = new Set<() => void>();
let progressInterval: ReturnType<typeof setInterval> | null = null;
let timeoutId: ReturnType<typeof setTimeout> | null = null;
// Cached server snapshot to avoid infinite loop with useSyncExternalStore
const SERVER_SNAPSHOT: ProgressState = { isLoading: false, progress: 0 };
function notify() {
listeners.forEach((listener) => listener());
}
function setState(newState: ProgressState) {
state = newState;
notify();
}
function clearTimers() {
if (progressInterval) {
clearInterval(progressInterval);
progressInterval = null;
}
if (timeoutId) {
clearTimeout(timeoutId);
timeoutId = null;
}
}
/**
* Start the progress bar animation.
* Progress increases quickly at first, then slows down as it approaches 90%.
*/
export function startProgress() {
clearTimers();
setState({ isLoading: true, progress: 0 });
progressInterval = setInterval(() => {
if (state.progress < 90) {
const increment = (90 - state.progress) * 0.1;
setState({
...state,
progress: Math.min(90, state.progress + increment),
});
}
}, 100);
}
/**
* Complete the progress bar animation.
* Jumps to 100% and then hides after a brief delay.
*/
export function completeProgress() {
clearTimers();
setState({ isLoading: false, progress: 100 });
timeoutId = setTimeout(() => {
setState({ isLoading: false, progress: 0 });
timeoutId = null;
}, 200);
}
/**
* Cancel the progress bar immediately without animation.
*/
export function cancelProgress() {
clearTimers();
setState({ isLoading: false, progress: 0 });
}
/**
* Hook to access progress bar state.
* Automatically completes progress when URL changes.
*/
export function useNavigationProgress() {
const pathname = usePathname();
const searchParams = useSearchParams();
const currentState = useSyncExternalStore(
(listener) => {
listeners.add(listener);
return () => listeners.delete(listener);
},
() => state,
() => SERVER_SNAPSHOT,
);
// Complete progress when URL changes (only if currently loading)
useEffect(() => {
if (state.isLoading) {
completeProgress();
}
}, [pathname, searchParams]);
return currentState;
}
+44
View File
@@ -0,0 +1,44 @@
/**
* Next.js Client Instrumentation
*
* This file runs on the client before React hydration.
* Used to set up navigation progress tracking.
*
* @see https://nextjs.org/docs/app/api-reference/file-conventions/instrumentation-client
*/
import {
cancelProgress,
startProgress,
} from "@/components/ui/navigation-progress/use-navigation-progress";
const NAVIGATION_TYPE = {
PUSH: "push",
REPLACE: "replace",
TRAVERSE: "traverse",
} as const;
type NavigationType = (typeof NAVIGATION_TYPE)[keyof typeof NAVIGATION_TYPE];
function getCurrentUrl(): string {
return window.location.pathname + window.location.search;
}
/**
* Called by Next.js when router navigation begins.
* Triggers the navigation progress bar.
*/
export function onRouterTransitionStart(
url: string,
_navigationType: NavigationType,
) {
const currentUrl = getCurrentUrl();
if (url === currentUrl) {
// Same URL - cancel any ongoing progress
cancelProgress();
} else {
// Different URL - start progress
startProgress();
}
}
+88 -27
View File
@@ -1,14 +1,65 @@
import {
Category,
CategoryData,
Control,
FailedSection,
Framework,
Requirement,
REQUIREMENT_STATUS,
RequirementItemData,
RequirementsData,
RequirementStatus,
TOP_FAILED_DATA_TYPE,
TopFailedDataType,
TopFailedResult,
} from "@/types/compliance";
// Type for the internal map used in getTopFailedSections
interface FailedSectionData {
total: number;
types: Record<string, number>;
}
/**
* Builds the TopFailedResult from the accumulated map data
*/
const buildTopFailedResult = (
map: Map<string, FailedSectionData>,
type: TopFailedDataType,
): TopFailedResult => ({
items: Array.from(map.entries())
.map(([name, data]): FailedSection => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5),
type,
});
/**
* Checks if the framework uses a flat structure (requirements directly on framework)
* vs hierarchical structure (categories -> controls -> requirements)
*/
const hasFlatStructure = (frameworks: Framework[]): boolean =>
frameworks.some(
(framework) =>
(framework.requirements?.length ?? 0) > 0 &&
framework.categories.length === 0,
);
/**
* Increments the failed count for a given name in the map
*/
const incrementFailedCount = (
map: Map<string, FailedSectionData>,
name: string,
type: string,
): void => {
if (!map.has(name)) {
map.set(name, { total: 0, types: {} });
}
const data = map.get(name)!;
data.total += 1;
data.types[type] = (data.types[type] || 0) + 1;
};
export const updateCounters = (
target: { pass: number; fail: number; manual: number },
status: RequirementStatus,
@@ -24,38 +75,45 @@ export const updateCounters = (
export const getTopFailedSections = (
mappedData: Framework[],
): FailedSection[] => {
const failedSectionMap = new Map();
): TopFailedResult => {
const failedSectionMap = new Map<string, FailedSectionData>();
if (hasFlatStructure(mappedData)) {
// Handle flat structure: count failed requirements directly
mappedData.forEach((framework) => {
const directRequirements = framework.requirements ?? [];
directRequirements.forEach((requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const type =
typeof requirement.type === "string" ? requirement.type : "Fails";
incrementFailedCount(failedSectionMap, requirement.name, type);
}
});
});
return buildTopFailedResult(
failedSectionMap,
TOP_FAILED_DATA_TYPE.REQUIREMENTS,
);
}
// Handle hierarchical structure: count by category (section)
mappedData.forEach((framework) => {
framework.categories.forEach((category) => {
category.controls.forEach((control) => {
control.requirements.forEach((requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const sectionName = category.name;
if (!failedSectionMap.has(sectionName)) {
failedSectionMap.set(sectionName, { total: 0, types: {} });
}
const sectionData = failedSectionMap.get(sectionName);
sectionData.total += 1;
const type = requirement.type || "Fails";
sectionData.types[type as string] =
(sectionData.types[type as string] || 0) + 1;
const type =
typeof requirement.type === "string" ? requirement.type : "Fails";
incrementFailedCount(failedSectionMap, category.name, type);
}
});
});
});
});
// Convert in descending order and slice top 5
return Array.from(failedSectionMap.entries())
.map(([name, data]) => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5); // Top 5
return buildTopFailedResult(failedSectionMap, TOP_FAILED_DATA_TYPE.SECTIONS);
};
export const calculateCategoryHeatmapData = (
@@ -146,9 +204,9 @@ export const findOrCreateFramework = (
};
export const findOrCreateCategory = (
categories: any[],
categories: Category[],
categoryName: string,
) => {
): Category => {
let category = categories.find((c) => c.name === categoryName);
if (!category) {
category = {
@@ -163,7 +221,10 @@ export const findOrCreateCategory = (
return category;
};
export const findOrCreateControl = (controls: any[], controlLabel: string) => {
export const findOrCreateControl = (
controls: Control[],
controlLabel: string,
): Control => {
let control = controls.find((c) => c.label === controlLabel);
if (!control) {
control = {
@@ -178,7 +239,7 @@ export const findOrCreateControl = (controls: any[], controlLabel: string) => {
return control;
};
export const calculateFrameworkCounters = (frameworks: Framework[]) => {
export const calculateFrameworkCounters = (frameworks: Framework[]): void => {
frameworks.forEach((framework) => {
// Reset framework counters
framework.pass = 0;
@@ -186,9 +247,9 @@ export const calculateFrameworkCounters = (frameworks: Framework[]) => {
framework.manual = 0;
// Handle flat structure (requirements directly in framework)
const directRequirements = (framework as any).requirements || [];
const directRequirements = framework.requirements ?? [];
if (directRequirements.length > 0) {
directRequirements.forEach((requirement: Requirement) => {
directRequirements.forEach((requirement) => {
updateCounters(framework, requirement.status);
});
return;
+15 -15
View File
@@ -1,4 +1,4 @@
import React from "react";
import { createElement, ReactNode } from "react";
import { AWSWellArchitectedCustomDetails } from "@/components/compliance/compliance-custom-details/aws-well-architected-details";
import { C5CustomDetails } from "@/components/compliance/compliance-custom-details/c5-details";
@@ -14,10 +14,10 @@ import { AccordionItemProps } from "@/components/ui/accordion/Accordion";
import {
AttributesData,
CategoryData,
FailedSection,
Framework,
Requirement,
RequirementsData,
TopFailedResult,
} from "@/types/compliance";
import {
@@ -74,9 +74,9 @@ export interface ComplianceMapper {
data: Framework[],
scanId: string | undefined,
) => AccordionItemProps[];
getTopFailedSections: (mappedData: Framework[]) => FailedSection[];
getTopFailedSections: (mappedData: Framework[]) => TopFailedResult;
calculateCategoryHeatmapData: (complianceData: Framework[]) => CategoryData[];
getDetailsComponent: (requirement: Requirement) => React.ReactNode;
getDetailsComponent: (requirement: Requirement) => ReactNode;
}
const getDefaultMapper = (): ComplianceMapper => ({
@@ -86,7 +86,7 @@ const getDefaultMapper = (): ComplianceMapper => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(GenericCustomDetails, { requirement }),
createElement(GenericCustomDetails, { requirement }),
});
const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
@@ -97,7 +97,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(C5CustomDetails, { requirement }),
createElement(C5CustomDetails, { requirement }),
},
ENS: {
mapComplianceData: mapENSComplianceData,
@@ -106,7 +106,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(ENSCustomDetails, { requirement }),
createElement(ENSCustomDetails, { requirement }),
},
ISO27001: {
mapComplianceData: mapISOComplianceData,
@@ -115,7 +115,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(ISOCustomDetails, { requirement }),
createElement(ISOCustomDetails, { requirement }),
},
CIS: {
mapComplianceData: mapCISComplianceData,
@@ -124,7 +124,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(CISCustomDetails, { requirement }),
createElement(CISCustomDetails, { requirement }),
},
"AWS-Well-Architected-Framework-Security-Pillar": {
mapComplianceData: mapAWSWellArchitectedComplianceData,
@@ -133,7 +133,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(AWSWellArchitectedCustomDetails, { requirement }),
createElement(AWSWellArchitectedCustomDetails, { requirement }),
},
"AWS-Well-Architected-Framework-Reliability-Pillar": {
mapComplianceData: mapAWSWellArchitectedComplianceData,
@@ -142,7 +142,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(AWSWellArchitectedCustomDetails, { requirement }),
createElement(AWSWellArchitectedCustomDetails, { requirement }),
},
"KISA-ISMS-P": {
mapComplianceData: mapKISAComplianceData,
@@ -151,7 +151,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(KISACustomDetails, { requirement }),
createElement(KISACustomDetails, { requirement }),
},
"MITRE-ATTACK": {
mapComplianceData: mapMITREComplianceData,
@@ -159,7 +159,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
getTopFailedSections: getMITRETopFailedSections,
calculateCategoryHeatmapData: calculateMITRECategoryHeatmapData,
getDetailsComponent: (requirement: Requirement) =>
React.createElement(MITRECustomDetails, { requirement }),
createElement(MITRECustomDetails, { requirement }),
},
ProwlerThreatScore: {
mapComplianceData: mapThetaComplianceData,
@@ -168,7 +168,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (complianceData: Framework[]) =>
calculateCategoryHeatmapData(complianceData),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(ThreatCustomDetails, { requirement }),
createElement(ThreatCustomDetails, { requirement }),
},
CCC: {
mapComplianceData: mapCCCComplianceData,
@@ -177,7 +177,7 @@ const getComplianceMappers = (): Record<string, ComplianceMapper> => ({
calculateCategoryHeatmapData: (data: Framework[]) =>
calculateCategoryHeatmapData(data),
getDetailsComponent: (requirement: Requirement) =>
React.createElement(CCCCustomDetails, { requirement }),
createElement(CCCCustomDetails, { requirement }),
},
});
+58 -45
View File
@@ -12,6 +12,8 @@ import {
REQUIREMENT_STATUS,
RequirementsData,
RequirementStatus,
TOP_FAILED_DATA_TYPE,
TopFailedResult,
} from "@/types/compliance";
import {
@@ -20,6 +22,12 @@ import {
findOrCreateFramework,
} from "./commons";
// Type for the internal map used in getTopFailedSections
interface FailedSectionData {
total: number;
types: Record<string, number>;
}
export const mapComplianceData = (
attributesData: AttributesData,
requirementsData: RequirementsData,
@@ -92,9 +100,9 @@ export const mapComplianceData = (
}) || [],
};
// Add requirement directly to framework (store in a special property)
(framework as any).requirements = (framework as any).requirements || [];
(framework as any).requirements.push(requirement);
// Add requirement directly to framework (flat structure - no categories)
framework.requirements = framework.requirements ?? [];
framework.requirements.push(requirement);
}
// Calculate counters using common helper (works with flat structure)
@@ -108,63 +116,63 @@ export const toAccordionItems = (
scanId: string | undefined,
): AccordionItemProps[] => {
return data.flatMap((framework) => {
const requirements = (framework as any).requirements || [];
const requirements = framework.requirements ?? [];
// Filter out requirements without metadata (can't be displayed in accordion)
const displayableRequirements = requirements.filter(
(requirement: Requirement) => requirement.hasMetadata !== false,
(requirement) => requirement.hasMetadata !== false,
);
return displayableRequirements.map(
(requirement: Requirement, i: number) => {
const itemKey = `${framework.name}-req-${i}`;
return displayableRequirements.map((requirement, i) => {
const itemKey = `${framework.name}-req-${i}`;
return {
key: itemKey,
title: (
<ComplianceAccordionRequirementTitle
type=""
name={requirement.name}
status={requirement.status as FindingStatus}
/>
),
content: (
<ClientAccordionContent
key={`content-${itemKey}`}
requirement={requirement}
scanId={scanId || ""}
framework={framework.name}
disableFindings={
requirement.check_ids.length === 0 && requirement.manual === 0
}
/>
),
items: [],
};
},
);
return {
key: itemKey,
title: (
<ComplianceAccordionRequirementTitle
type=""
name={requirement.name}
status={requirement.status as FindingStatus}
/>
),
content: (
<ClientAccordionContent
key={`content-${itemKey}`}
requirement={requirement}
scanId={scanId || ""}
framework={framework.name}
disableFindings={
requirement.check_ids.length === 0 && requirement.manual === 0
}
/>
),
items: [],
};
});
});
};
// Custom function for MITRE to get top failed sections grouped by tactics
export const getTopFailedSections = (
mappedData: Framework[],
): FailedSection[] => {
const failedSectionMap = new Map();
): TopFailedResult => {
const failedSectionMap = new Map<string, FailedSectionData>();
mappedData.forEach((framework) => {
const requirements = (framework as any).requirements || [];
const requirements = framework.requirements ?? [];
requirements.forEach((requirement: Requirement) => {
requirements.forEach((requirement) => {
if (requirement.status === REQUIREMENT_STATUS.FAIL) {
const tactics = (requirement.tactics as string[]) || [];
const tactics = Array.isArray(requirement.tactics)
? (requirement.tactics as string[])
: [];
tactics.forEach((tactic) => {
if (!failedSectionMap.has(tactic)) {
failedSectionMap.set(tactic, { total: 0, types: {} });
}
const sectionData = failedSectionMap.get(tactic);
const sectionData = failedSectionMap.get(tactic)!;
sectionData.total += 1;
const type = "Fails";
@@ -175,10 +183,13 @@ export const getTopFailedSections = (
});
// Convert in descending order and slice top 5
return Array.from(failedSectionMap.entries())
.map(([name, data]) => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5); // Top 5
return {
items: Array.from(failedSectionMap.entries())
.map(([name, data]): FailedSection => ({ name, ...data }))
.sort((a, b) => b.total - a.total)
.slice(0, 5),
type: TOP_FAILED_DATA_TYPE.SECTIONS,
};
};
// Custom function for MITRE to calculate category heatmap data grouped by tactics
@@ -197,10 +208,12 @@ export const calculateCategoryHeatmapData = (
// Aggregate data by tactics
complianceData.forEach((framework) => {
const requirements = (framework as any).requirements || [];
const requirements = framework.requirements ?? [];
requirements.forEach((requirement: Requirement) => {
const tactics = (requirement.tactics as string[]) || [];
requirements.forEach((requirement) => {
const tactics = Array.isArray(requirement.tactics)
? (requirement.tactics as string[])
: [];
tactics.forEach((tactic) => {
const existing = tacticMap.get(tactic) || {
+9
View File
@@ -75,6 +75,9 @@
/* Chart Dots */
--chart-dots: var(--color-neutral-200);
/* Progress Bar */
--shadow-progress-glow: 0 0 10px var(--bg-button-primary), 0 0 5px var(--bg-button-primary);
}
/* ===== DARK THEME ===== */
@@ -144,6 +147,9 @@
/* Chart Dots */
--chart-dots: var(--text-neutral-primary);
/* Progress Bar */
--shadow-progress-glow: 0 0 10px var(--bg-button-primary), 0 0 5px var(--bg-button-primary);
}
/* ===== TAILWIND THEME MAPPINGS ===== */
@@ -211,6 +217,9 @@
--color-bg-warning: var(--bg-warning-primary);
--color-bg-fail: var(--bg-fail-primary);
--color-bg-fail-secondary: var(--bg-fail-secondary);
/* Shadows */
--shadow-progress-glow: var(--shadow-progress-glow);
}
/* ===== CONTAINER UTILITY ===== */
+17 -2
View File
@@ -68,12 +68,27 @@ export interface Framework {
fail: number;
manual: number;
categories: Category[];
// Optional: flat structure for frameworks like MITRE that don't have categories
requirements?: Requirement[];
}
export interface FailedSection {
name: string;
total: number;
types?: { [key: string]: number };
types?: Record<string, number>;
}
export const TOP_FAILED_DATA_TYPE = {
SECTIONS: "sections",
REQUIREMENTS: "requirements",
} as const;
export type TopFailedDataType =
(typeof TOP_FAILED_DATA_TYPE)[keyof typeof TOP_FAILED_DATA_TYPE];
export interface TopFailedResult {
items: FailedSection[];
type: TopFailedDataType;
}
export interface RequirementsTotals {
@@ -92,7 +107,7 @@ export interface ENSAttributesMetadata {
Nivel: string;
Dimensiones: string[];
ModoEjecucion: string;
Dependencias: any[];
Dependencias: unknown[];
}
export interface ISO27001AttributesMetadata {