mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-14 08:14:28 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| acee366e82 | |||
| 47d66c9c4c | |||
| 8d41941d22 | |||
| 962c64eae5 | |||
| 7b56f0640f | |||
| 49c75cc418 | |||
| 56bca7c104 |
@@ -32,6 +32,7 @@ Please add a detailed description of how to review this PR.
|
||||
#### API
|
||||
- [ ] Verify if API specs need to be regenerated.
|
||||
- [ ] Check if version updates are required (e.g., specs, Poetry, etc.).
|
||||
- [ ] Query performance validated with `EXPLAIN ANALYZE` for new/modified endpoints. See [Query Performance Guide](https://github.com/prowler-cloud/prowler/blob/master/api/docs/query-performance-guide.md).
|
||||
- [ ] Ensure new entries are added to [CHANGELOG.md](https://github.com/prowler-cloud/prowler/blob/master/api/CHANGELOG.md), if applicable.
|
||||
|
||||
### License
|
||||
|
||||
@@ -48,8 +48,34 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -78,21 +104,6 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification-started
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push API container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
@@ -106,23 +117,6 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
@@ -169,6 +163,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -47,8 +47,34 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -76,21 +102,6 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification-started
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push MCP container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
@@ -112,23 +123,6 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
@@ -175,6 +169,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -50,30 +50,15 @@ env:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
container-build-push:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: 'false'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
@@ -93,32 +78,24 @@ jobs:
|
||||
run: |
|
||||
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
|
||||
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Extract major version
|
||||
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
|
||||
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Set version-specific tags
|
||||
case ${PROWLER_VERSION_MAJOR} in
|
||||
3)
|
||||
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
|
||||
;;
|
||||
4)
|
||||
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
|
||||
;;
|
||||
5)
|
||||
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v5 detected - tags: latest, stable"
|
||||
@@ -129,6 +106,53 @@ jobs:
|
||||
;;
|
||||
esac
|
||||
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
@@ -147,21 +171,6 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification-started
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push SDK container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
@@ -172,30 +181,13 @@ jobs:
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}-${{ matrix.arch }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [container-build-push]
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -222,24 +214,24 @@ jobs:
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
@@ -249,13 +241,47 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
dispatch-v3-deployment:
|
||||
if: needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
needs: container-build-push
|
||||
if: needs.setup.outputs.prowler_version_major == '3'
|
||||
needs: [setup, container-build-push]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
@@ -282,4 +308,4 @@ jobs:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"release","tag":"${{ needs.container-build-push.outputs.prowler_version }}"}'
|
||||
client-payload: '{"version":"release","tag":"${{ needs.setup.outputs.prowler_version }}"}'
|
||||
|
||||
@@ -82,9 +82,110 @@ jobs:
|
||||
./tests/**/aws/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Resolve AWS services under test
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
id: aws-services
|
||||
shell: bash
|
||||
run: |
|
||||
python3 <<'PY'
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
dependents = {
|
||||
"acm": ["elb"],
|
||||
"autoscaling": ["dynamodb"],
|
||||
"awslambda": ["ec2", "inspector2"],
|
||||
"backup": ["dynamodb", "ec2", "rds"],
|
||||
"cloudfront": ["shield"],
|
||||
"cloudtrail": ["awslambda", "cloudwatch"],
|
||||
"cloudwatch": ["bedrock"],
|
||||
"ec2": ["dlm", "dms", "elbv2", "emr", "inspector2", "rds", "redshift", "route53", "shield", "ssm"],
|
||||
"ecr": ["inspector2"],
|
||||
"elb": ["shield"],
|
||||
"elbv2": ["shield"],
|
||||
"globalaccelerator": ["shield"],
|
||||
"iam": ["bedrock", "cloudtrail", "cloudwatch", "codebuild"],
|
||||
"kafka": ["firehose"],
|
||||
"kinesis": ["firehose"],
|
||||
"kms": ["kafka"],
|
||||
"organizations": ["iam", "servicecatalog"],
|
||||
"route53": ["shield"],
|
||||
"s3": ["bedrock", "cloudfront", "cloudtrail", "macie"],
|
||||
"ssm": ["ec2"],
|
||||
"vpc": ["awslambda", "ec2", "efs", "elasticache", "neptune", "networkfirewall", "rds", "redshift", "workspaces"],
|
||||
"waf": ["elbv2"],
|
||||
"wafv2": ["cognito", "elbv2"],
|
||||
}
|
||||
|
||||
changed_raw = """${{ steps.changed-aws.outputs.all_changed_files }}"""
|
||||
# all_changed_files is space-separated, not newline-separated
|
||||
# Strip leading "./" if present for consistent path handling
|
||||
changed_files = [Path(f.lstrip("./")) for f in changed_raw.split() if f]
|
||||
|
||||
services = set()
|
||||
run_all = False
|
||||
|
||||
for path in changed_files:
|
||||
path_str = path.as_posix()
|
||||
parts = path.parts
|
||||
if path_str.startswith("prowler/providers/aws/services/"):
|
||||
if len(parts) > 4 and "." not in parts[4]:
|
||||
services.add(parts[4])
|
||||
else:
|
||||
run_all = True
|
||||
elif path_str.startswith("tests/providers/aws/services/"):
|
||||
if len(parts) > 4 and "." not in parts[4]:
|
||||
services.add(parts[4])
|
||||
else:
|
||||
run_all = True
|
||||
elif path_str.startswith("prowler/providers/aws/") or path_str.startswith("tests/providers/aws/"):
|
||||
run_all = True
|
||||
|
||||
# Expand with direct dependent services (one level only)
|
||||
# We only test services that directly depend on the changed services,
|
||||
# not transitive dependencies (services that depend on dependents)
|
||||
original_services = set(services)
|
||||
for svc in original_services:
|
||||
for dep in dependents.get(svc, []):
|
||||
services.add(dep)
|
||||
|
||||
if run_all or not services:
|
||||
run_all = True
|
||||
services = set()
|
||||
|
||||
service_paths = " ".join(sorted(f"tests/providers/aws/services/{svc}" for svc in services))
|
||||
|
||||
output_lines = [
|
||||
f"run_all={'true' if run_all else 'false'}",
|
||||
f"services={' '.join(sorted(services))}",
|
||||
f"service_paths={service_paths}",
|
||||
]
|
||||
|
||||
with open(os.environ["GITHUB_OUTPUT"], "a") as gh_out:
|
||||
for line in output_lines:
|
||||
gh_out.write(line + "\n")
|
||||
|
||||
print(f"AWS changed files (filtered): {changed_raw or 'none'}")
|
||||
print(f"Run all AWS tests: {run_all}")
|
||||
if services:
|
||||
print(f"AWS service test paths: {service_paths}")
|
||||
else:
|
||||
print("AWS service test paths: none detected")
|
||||
PY
|
||||
|
||||
- name: Run AWS tests
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
run: |
|
||||
echo "AWS run_all=${{ steps.aws-services.outputs.run_all }}"
|
||||
echo "AWS service_paths='${{ steps.aws-services.outputs.service_paths }}'"
|
||||
|
||||
if [ "${{ steps.aws-services.outputs.run_all }}" = "true" ]; then
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
elif [ -z "${{ steps.aws-services.outputs.service_paths }}" ]; then
|
||||
echo "No AWS service paths detected; skipping AWS tests."
|
||||
else
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${{ steps.aws-services.outputs.service_paths }}
|
||||
fi
|
||||
|
||||
- name: Upload AWS coverage to Codecov
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
|
||||
@@ -50,8 +50,34 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -80,21 +106,6 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification-started
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push UI container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
@@ -111,23 +122,6 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
update-ts: ${{ steps.slack-notification-started.outputs.ts }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
@@ -174,6 +168,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -0,0 +1,457 @@
|
||||
# Query Performance Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide explains how to validate query performance when developing new endpoints or modifying existing ones. **This is part of the development process**, not a separate task—just like writing unit tests.
|
||||
|
||||
The goal is simple: ensure PostgreSQL uses indexes correctly for the queries your code generates.
|
||||
|
||||
## When to Validate
|
||||
|
||||
You **must** validate query performance when:
|
||||
|
||||
- Creating a new endpoint that queries the database
|
||||
- Modifying an existing query (adding filters, joins, or sorting)
|
||||
- Adding new indexes
|
||||
- Working on performance-critical endpoints (overviews, findings, resources)
|
||||
|
||||
## Profiling with Django Silk (Recommended)
|
||||
|
||||
[Django Silk](https://github.com/jazzband/django-silk) is the recommended way to profile queries because it captures the actual SQL generated by your code during real HTTP requests. This gives you the most accurate picture of what happens in production.
|
||||
|
||||
### Enabling Silk
|
||||
|
||||
Silk is installed as a dev dependency but disabled by default. To enable it temporarily for profiling:
|
||||
|
||||
#### 1. Add Silk to your local settings
|
||||
|
||||
In `api/src/backend/config/django/devel.py`, add at the end of the file:
|
||||
|
||||
```python
|
||||
# Silk profiler (temporary - remove after profiling)
|
||||
INSTALLED_APPS += ["silk"] # noqa: F405
|
||||
MIDDLEWARE += ["silk.middleware.SilkyMiddleware"] # noqa: F405
|
||||
```
|
||||
|
||||
#### 2. Add Silk URLs
|
||||
|
||||
In `api/src/backend/api/v1/urls.py`, add at the end:
|
||||
|
||||
```python
|
||||
from django.conf import settings
|
||||
|
||||
if settings.DEBUG:
|
||||
urlpatterns += [path("silk/", include("silk.urls", namespace="silk"))]
|
||||
```
|
||||
|
||||
#### 3. Run Silk migrations
|
||||
|
||||
```bash
|
||||
cd api/src/backend
|
||||
poetry run python manage.py migrate --database admin
|
||||
```
|
||||
|
||||
#### 4. Access Silk
|
||||
|
||||
Start the development server and navigate to `http://localhost:8000/api/v1/silk/`
|
||||
|
||||
### Using Silk
|
||||
|
||||
1. Make requests to the endpoint you want to profile
|
||||
2. Open Silk UI and find your request
|
||||
3. Click on the request to see all SQL queries executed
|
||||
4. For each query, you can see:
|
||||
- Execution time
|
||||
- Number of similar queries (N+1 detection)
|
||||
- The actual SQL with parameters
|
||||
- **EXPLAIN output** (click on a query to see it)
|
||||
|
||||
### Disabling Silk
|
||||
|
||||
After profiling, **remove the changes** you made to `devel.py` and `urls.py`. Don't commit Silk configuration to the repository.
|
||||
|
||||
## Manual Query Analysis with EXPLAIN ANALYZE
|
||||
|
||||
For quick checks or when you need more control, you can run `EXPLAIN ANALYZE` directly.
|
||||
|
||||
### 1. Get Your Query
|
||||
|
||||
#### Option A: Using Django Shell with RLS
|
||||
|
||||
This approach mirrors how queries run in production with Row Level Security enabled:
|
||||
|
||||
```bash
|
||||
cd api/src/backend
|
||||
poetry run python manage.py shell
|
||||
```
|
||||
|
||||
```python
|
||||
from django.db import connection
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding
|
||||
|
||||
tenant_id = "your-tenant-uuid"
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
# Build your queryset
|
||||
qs = Finding.objects.filter(status="FAIL").order_by("-inserted_at")[:25]
|
||||
|
||||
# Force evaluation
|
||||
list(qs)
|
||||
|
||||
# Get the SQL
|
||||
print(connection.queries[-1]['sql'])
|
||||
```
|
||||
|
||||
#### Option B: Print Query Without Execution
|
||||
|
||||
```python
|
||||
from api.models import Finding
|
||||
|
||||
queryset = Finding.objects.filter(status="FAIL")
|
||||
print(queryset.query)
|
||||
```
|
||||
|
||||
> **Note:** This won't include RLS filters, so the actual production query will differ.
|
||||
|
||||
#### Option C: Enable SQL Logging
|
||||
|
||||
Set `DJANGO_LOGGING_LEVEL=DEBUG` in your environment:
|
||||
|
||||
```bash
|
||||
DJANGO_LOGGING_LEVEL=DEBUG poetry run python manage.py runserver
|
||||
```
|
||||
|
||||
### 2. Run EXPLAIN ANALYZE
|
||||
|
||||
Connect to PostgreSQL and run:
|
||||
|
||||
```sql
|
||||
EXPLAIN ANALYZE <your_query>;
|
||||
```
|
||||
|
||||
Or with more details:
|
||||
|
||||
```sql
|
||||
EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT) <your_query>;
|
||||
```
|
||||
|
||||
#### Running EXPLAIN with RLS Context
|
||||
|
||||
To test with RLS enabled (as it runs in production), set the tenant context first:
|
||||
|
||||
```sql
|
||||
-- Set tenant context
|
||||
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
|
||||
|
||||
-- Then run your EXPLAIN ANALYZE
|
||||
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL' LIMIT 25;
|
||||
```
|
||||
|
||||
### 3. Interpret the Results
|
||||
|
||||
#### Good Signs (Index is being used)
|
||||
|
||||
```
|
||||
Index Scan using findings_tenant_status_idx on findings
|
||||
Index Cond: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
|
||||
Rows Removed by Filter: 0
|
||||
Actual Rows: 150
|
||||
Planning Time: 0.5 ms
|
||||
Execution Time: 2.3 ms
|
||||
```
|
||||
|
||||
#### Bad Signs (Sequential scan - no index)
|
||||
|
||||
```
|
||||
Seq Scan on findings
|
||||
Filter: ((tenant_id = '...'::uuid) AND (status = 'FAIL'))
|
||||
Rows Removed by Filter: 999850
|
||||
Actual Rows: 150
|
||||
Planning Time: 0.3 ms
|
||||
Execution Time: 450.2 ms
|
||||
```
|
||||
|
||||
## Quick Reference: What to Look For
|
||||
|
||||
| What You See | Meaning | Action |
|
||||
|--------------|---------|--------|
|
||||
| `Index Scan` | Index is being used | Good, no action needed |
|
||||
| `Index Only Scan` | Even better - data comes from index only | Good, no action needed |
|
||||
| `Bitmap Index Scan` | Index used, results combined | Usually fine |
|
||||
| `Seq Scan` on large tables | Full table scan, no index | **Needs investigation** |
|
||||
| `Rows Removed by Filter: <high number>` | Fetching too many rows | **Query or index issue** |
|
||||
| High `Execution Time` | Query is slow | **Needs optimization** |
|
||||
|
||||
## Common Issues and Fixes
|
||||
|
||||
### 1. Missing Index
|
||||
|
||||
**Problem:** `Seq Scan` on a filtered column
|
||||
|
||||
```sql
|
||||
-- Bad: No index on status
|
||||
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
|
||||
-- Shows: Seq Scan on findings
|
||||
```
|
||||
|
||||
**Fix:** Add an index
|
||||
|
||||
```python
|
||||
# In your model
|
||||
class Meta:
|
||||
indexes = [
|
||||
models.Index(fields=['status'], name='findings_status_idx'),
|
||||
]
|
||||
```
|
||||
|
||||
### 2. Index Not Used Due to Type Mismatch
|
||||
|
||||
**Problem:** Index exists but PostgreSQL doesn't use it
|
||||
|
||||
```sql
|
||||
-- If tenant_id is UUID but you're passing a string without cast
|
||||
WHERE tenant_id = 'some-uuid-string'
|
||||
```
|
||||
|
||||
**Fix:** Ensure proper type casting in your queries
|
||||
|
||||
### 3. Index Not Used Due to Function Call
|
||||
|
||||
**Problem:** Wrapping column in a function prevents index usage
|
||||
|
||||
```sql
|
||||
-- Bad: Index on inserted_at won't be used
|
||||
WHERE DATE(inserted_at) = '2024-01-01'
|
||||
|
||||
-- Good: Use range instead
|
||||
WHERE inserted_at >= '2024-01-01' AND inserted_at < '2024-01-02'
|
||||
```
|
||||
|
||||
### 4. Wrong Index for Sorting
|
||||
|
||||
**Problem:** Query is sorted but index doesn't match sort order
|
||||
|
||||
```sql
|
||||
-- If you have ORDER BY inserted_at DESC
|
||||
-- You need an index with DESC or PostgreSQL will sort in memory
|
||||
```
|
||||
|
||||
**Fix:** Create index with matching sort order
|
||||
|
||||
```python
|
||||
class Meta:
|
||||
indexes = [
|
||||
models.Index(fields=['-inserted_at'], name='findings_inserted_desc_idx'),
|
||||
]
|
||||
```
|
||||
|
||||
### 5. Composite Index Column Order
|
||||
|
||||
**Problem:** Index exists but columns are in wrong order
|
||||
|
||||
```sql
|
||||
-- Index on (tenant_id, scan_id)
|
||||
-- This query WON'T use the index efficiently:
|
||||
WHERE scan_id = '...'
|
||||
|
||||
-- This query WILL use the index:
|
||||
WHERE tenant_id = '...' AND scan_id = '...'
|
||||
```
|
||||
|
||||
**Rule:** The leftmost columns in a composite index must be in your WHERE clause.
|
||||
|
||||
## RLS (Row Level Security) Considerations
|
||||
|
||||
Prowler uses Row Level Security via PostgreSQL's `set_config`. When analyzing queries, remember:
|
||||
|
||||
1. RLS policies add implicit `WHERE tenant_id = current_tenant()` to queries
|
||||
2. Always test with RLS enabled (how it runs in production)
|
||||
3. Ensure `tenant_id` is the **first column** in composite indexes
|
||||
|
||||
### Using rls_transaction in Code
|
||||
|
||||
The `rls_transaction` context manager from `api.db_utils` sets the tenant context for all queries within its scope:
|
||||
|
||||
```python
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
# All queries here will have RLS applied
|
||||
qs = Finding.objects.filter(status="FAIL")
|
||||
list(qs) # Execute
|
||||
```
|
||||
|
||||
### Using RLS in Raw SQL (psql)
|
||||
|
||||
```sql
|
||||
-- Set tenant context for the transaction
|
||||
SELECT set_config('api.tenant_id', 'your-tenant-uuid', TRUE);
|
||||
|
||||
-- Now RLS policies will filter by this tenant
|
||||
EXPLAIN ANALYZE SELECT * FROM findings WHERE status = 'FAIL';
|
||||
```
|
||||
|
||||
### Index Design for RLS
|
||||
|
||||
Since every query includes `tenant_id` via RLS, your composite indexes should **always start with `tenant_id`**:
|
||||
|
||||
```python
|
||||
class Meta:
|
||||
indexes = [
|
||||
# Good: tenant_id first
|
||||
models.Index(fields=['tenant_id', 'status', 'severity']),
|
||||
|
||||
# Bad: tenant_id not first - RLS queries won't use this efficiently
|
||||
models.Index(fields=['status', 'tenant_id']),
|
||||
]
|
||||
```
|
||||
|
||||
## Test Data Requirements
|
||||
|
||||
The amount of test data you need depends on what you're testing. PostgreSQL's query planner considers table statistics, index definitions, and data distribution when choosing execution plans.
|
||||
|
||||
### Important Considerations
|
||||
|
||||
1. **Small datasets may not use indexes**: PostgreSQL may choose a sequential scan over an index scan if the table is small enough that scanning it directly is faster. This is expected behavior.
|
||||
|
||||
2. **Data must exist in the tables you're querying**: If your endpoint queries `findings`, `resources`, `scans`, or other tables, ensure those tables have data. Use the `findings` management command to generate test data:
|
||||
|
||||
```bash
|
||||
cd api/src/backend
|
||||
poetry run python manage.py findings \
|
||||
--tenant <TENANT_ID> \
|
||||
--findings 1000 \
|
||||
--resources 500 \
|
||||
--batch 500
|
||||
```
|
||||
|
||||
3. **Update table statistics**: After inserting test data, run `ANALYZE` to update PostgreSQL's statistics:
|
||||
|
||||
```sql
|
||||
ANALYZE findings;
|
||||
ANALYZE resources;
|
||||
ANALYZE scans;
|
||||
-- Add other tables as needed
|
||||
```
|
||||
|
||||
4. **Test with realistic data distribution**: If your query filters by a specific value (e.g., `status='FAIL'`), ensure your test data includes a realistic mix of values.
|
||||
|
||||
### When Index Usage Matters Most
|
||||
|
||||
Focus on validating index usage when:
|
||||
|
||||
- The table will have thousands or millions of rows in production
|
||||
- The query is called frequently (list endpoints, dashboards)
|
||||
- The query has multiple filters or joins
|
||||
|
||||
For small lookup tables or infrequently-called endpoints, sequential scans may be acceptable.
|
||||
|
||||
## Performance Checklist for PRs
|
||||
|
||||
Before submitting a PR that adds or modifies database queries:
|
||||
|
||||
- [ ] Profiled queries with Silk or `EXPLAIN ANALYZE`
|
||||
- [ ] Verified indexes are being used (no unexpected `Seq Scan` on large tables)
|
||||
- [ ] Checked `Rows Removed by Filter` is reasonable
|
||||
- [ ] Tested with RLS enabled
|
||||
- [ ] For critical endpoints: documented the query plan in the PR
|
||||
|
||||
## Useful Commands
|
||||
|
||||
### Update Table Statistics
|
||||
|
||||
```sql
|
||||
ANALYZE findings;
|
||||
ANALYZE resources;
|
||||
```
|
||||
|
||||
### See Existing Indexes
|
||||
|
||||
```sql
|
||||
SELECT indexname, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE tablename = 'findings';
|
||||
```
|
||||
|
||||
### See Index Usage Stats
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan,
|
||||
idx_tup_read,
|
||||
idx_tup_fetch
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE tablename = 'findings'
|
||||
ORDER BY idx_scan DESC;
|
||||
```
|
||||
|
||||
### Check Table Size
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
relname as table_name,
|
||||
pg_size_pretty(pg_total_relation_size(relid)) as total_size
|
||||
FROM pg_catalog.pg_statio_user_tables
|
||||
WHERE relname IN ('findings', 'resources', 'scans')
|
||||
ORDER BY pg_total_relation_size(relid) DESC;
|
||||
```
|
||||
|
||||
## Working with Partitioned Tables
|
||||
|
||||
The `findings` and `resource_finding_mappings` tables are partitioned. When adding indexes, use the helper functions from `api.db_utils`:
|
||||
|
||||
### Adding Indexes to Partitions
|
||||
|
||||
```python
|
||||
# In a migration file
|
||||
from functools import partial
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False # Required for CONCURRENTLY
|
||||
|
||||
dependencies = [
|
||||
("api", "XXXX_previous_migration"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="my_new_idx",
|
||||
columns="tenant_id, status, severity",
|
||||
all_partitions=False, # Only current/future partitions
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="my_new_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- `all_partitions=False` (default): Only creates indexes on current and future partitions. Use this for new indexes to avoid maintenance overhead on old data.
|
||||
- `all_partitions=True`: Creates indexes on all partitions. Use when migrating critical existing indexes.
|
||||
|
||||
See [Partitions Documentation](./partitions.md) for more details on partitioning strategy.
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [Django Silk Documentation](https://github.com/jazzband/django-silk)
|
||||
- [PostgreSQL EXPLAIN Documentation](https://www.postgresql.org/docs/current/sql-explain.html)
|
||||
- [Using EXPLAIN](https://www.postgresql.org/docs/current/using-explain.html)
|
||||
- [Index Types in PostgreSQL](https://www.postgresql.org/docs/current/indexes-types.html)
|
||||
- [Prowler Partitions Documentation](./partitions.md)
|
||||
@@ -6,6 +6,7 @@ All notable changes to the **Prowler UI** are documented in this file.
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- Risk Plot component with interactive legend and severity navigation to Overview page [(#9469)](https://github.com/prowler-cloud/prowler/pull/9469)
|
||||
- Navigation progress bar for page transitions using Next.js `onRouterTransitionStart` [(#9465)](https://github.com/prowler-cloud/prowler/pull/9465)
|
||||
- Finding Severity Over Time chart component to Overview page [(#9405)](https://github.com/prowler-cloud/prowler/pull/9405)
|
||||
- Attack Surface component to Overview page [(#9412)](https://github.com/prowler-cloud/prowler/pull/9412)
|
||||
|
||||
@@ -3,6 +3,7 @@ export * from "./attack-surface";
|
||||
export * from "./findings";
|
||||
export * from "./providers";
|
||||
export * from "./regions";
|
||||
export * from "./risk-plot";
|
||||
export * from "./services";
|
||||
export * from "./severity-trends";
|
||||
export * from "./threat-score";
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
// Risk Plot Actions
|
||||
export * from "./risk-plot";
|
||||
export * from "./risk-plot.adapter";
|
||||
export * from "./types/risk-plot.types";
|
||||
@@ -0,0 +1,94 @@
|
||||
import { getProviderDisplayName } from "@/types/providers";
|
||||
|
||||
import type {
|
||||
ProviderRiskData,
|
||||
RiskPlotDataResponse,
|
||||
RiskPlotPoint,
|
||||
} from "./types/risk-plot.types";
|
||||
|
||||
/**
|
||||
* Calculates percentage with proper rounding.
|
||||
*/
|
||||
function calculatePercentage(value: number, total: number): number {
|
||||
if (total === 0) return 0;
|
||||
return Math.round((value / total) * 100);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapts raw provider risk data to the format expected by RiskPlotClient.
|
||||
*
|
||||
* @param providersRiskData - Array of risk data per provider from API
|
||||
* @returns Formatted data for the Risk Plot scatter chart
|
||||
*/
|
||||
export function adaptToRiskPlotData(
|
||||
providersRiskData: ProviderRiskData[],
|
||||
): RiskPlotDataResponse {
|
||||
const points: RiskPlotPoint[] = [];
|
||||
const providersWithoutData: RiskPlotDataResponse["providersWithoutData"] = [];
|
||||
|
||||
for (const providerData of providersRiskData) {
|
||||
// Skip providers without ThreatScore data (no completed scans)
|
||||
if (providerData.overallScore === null) {
|
||||
providersWithoutData.push({
|
||||
id: providerData.providerId,
|
||||
name: providerData.providerName,
|
||||
type: providerData.providerType,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Convert provider type to display name (aws -> AWS, gcp -> Google, etc.)
|
||||
const providerDisplayName = getProviderDisplayName(
|
||||
providerData.providerType,
|
||||
);
|
||||
|
||||
// Build severity data for the horizontal bar chart with percentages
|
||||
let severityData;
|
||||
let totalFailedFindings = 0;
|
||||
|
||||
if (providerData.severity) {
|
||||
const { critical, high, medium, low, informational } =
|
||||
providerData.severity;
|
||||
totalFailedFindings = critical + high + medium + low + informational;
|
||||
|
||||
severityData = [
|
||||
{
|
||||
name: "Critical",
|
||||
value: critical,
|
||||
percentage: calculatePercentage(critical, totalFailedFindings),
|
||||
},
|
||||
{
|
||||
name: "High",
|
||||
value: high,
|
||||
percentage: calculatePercentage(high, totalFailedFindings),
|
||||
},
|
||||
{
|
||||
name: "Medium",
|
||||
value: medium,
|
||||
percentage: calculatePercentage(medium, totalFailedFindings),
|
||||
},
|
||||
{
|
||||
name: "Low",
|
||||
value: low,
|
||||
percentage: calculatePercentage(low, totalFailedFindings),
|
||||
},
|
||||
{
|
||||
name: "Info",
|
||||
value: informational,
|
||||
percentage: calculatePercentage(informational, totalFailedFindings),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
points.push({
|
||||
x: providerData.overallScore ?? 0,
|
||||
y: totalFailedFindings,
|
||||
provider: providerDisplayName,
|
||||
name: providerData.providerName,
|
||||
providerId: providerData.providerId,
|
||||
severityData,
|
||||
});
|
||||
}
|
||||
|
||||
return { points, providersWithoutData };
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
"use server";
|
||||
|
||||
import { getFindingsBySeverity } from "@/actions/overview/findings";
|
||||
import { getThreatScore } from "@/actions/overview/threat-score";
|
||||
import { ProviderProps } from "@/types/providers";
|
||||
|
||||
import { ProviderRiskData } from "./types/risk-plot.types";
|
||||
|
||||
/**
|
||||
* Fetches risk data for a single provider.
|
||||
* Combines ThreatScore and Severity data in parallel.
|
||||
*/
|
||||
export async function getProviderRiskData(
|
||||
provider: ProviderProps,
|
||||
): Promise<ProviderRiskData> {
|
||||
const providerId = provider.id;
|
||||
const providerType = provider.attributes.provider;
|
||||
const providerName = provider.attributes.alias || provider.attributes.uid;
|
||||
|
||||
// Fetch ThreatScore and Severity in parallel
|
||||
const [threatScoreResponse, severityResponse] = await Promise.all([
|
||||
getThreatScore({
|
||||
filters: {
|
||||
provider_id: providerId,
|
||||
include: "provider",
|
||||
},
|
||||
}),
|
||||
getFindingsBySeverity({
|
||||
filters: {
|
||||
"filter[provider_id]": providerId,
|
||||
"filter[status]": "FAIL",
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
// Extract ThreatScore data
|
||||
// When filtering by single provider, API returns array with one item (not aggregated)
|
||||
const threatScoreData = threatScoreResponse?.data?.[0]?.attributes;
|
||||
const overallScore = threatScoreData?.overall_score
|
||||
? parseFloat(threatScoreData.overall_score)
|
||||
: null;
|
||||
const failedFindings = threatScoreData?.failed_findings ?? 0;
|
||||
|
||||
// Extract Severity data
|
||||
const severityData = severityResponse?.data?.attributes ?? null;
|
||||
|
||||
return {
|
||||
providerId,
|
||||
providerType,
|
||||
providerName,
|
||||
overallScore,
|
||||
failedFindings,
|
||||
severity: severityData,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches risk data for multiple providers in parallel.
|
||||
* Used by the Risk Plot SSR component.
|
||||
*/
|
||||
export async function getProvidersRiskData(
|
||||
providers: ProviderProps[],
|
||||
): Promise<ProviderRiskData[]> {
|
||||
const riskDataPromises = providers.map((provider) =>
|
||||
getProviderRiskData(provider),
|
||||
);
|
||||
|
||||
return Promise.all(riskDataPromises);
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
// Risk Plot Types
|
||||
// Data structures for the Risk Plot scatter chart
|
||||
|
||||
import type { BarDataPoint } from "@/components/graphs/types";
|
||||
|
||||
/**
|
||||
* Represents a single point in the Risk Plot scatter chart.
|
||||
* Each point represents a provider/account with its risk metrics.
|
||||
*/
|
||||
export interface RiskPlotPoint {
|
||||
/** ThreatScore (0-100 scale, higher = better) */
|
||||
x: number;
|
||||
/** Total failed findings count */
|
||||
y: number;
|
||||
/** Provider type display name (AWS, Azure, Google, etc.) */
|
||||
provider: string;
|
||||
/** Provider alias or UID (account identifier) */
|
||||
name: string;
|
||||
/** Provider ID for filtering/navigation */
|
||||
providerId: string;
|
||||
/** Severity breakdown for the horizontal bar chart */
|
||||
severityData?: BarDataPoint[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Raw data from the API combined for a single provider.
|
||||
* Used internally before transformation to RiskPlotPoint.
|
||||
*/
|
||||
export interface ProviderRiskData {
|
||||
providerId: string;
|
||||
providerType: string;
|
||||
providerName: string;
|
||||
/** ThreatScore overall_score (0-100 scale) */
|
||||
overallScore: number | null;
|
||||
/** Failed findings from ThreatScore snapshot */
|
||||
failedFindings: number;
|
||||
/** Severity breakdown */
|
||||
severity: {
|
||||
critical: number;
|
||||
high: number;
|
||||
medium: number;
|
||||
low: number;
|
||||
informational: number;
|
||||
} | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response structure for risk plot data fetching.
|
||||
*/
|
||||
export interface RiskPlotDataResponse {
|
||||
points: RiskPlotPoint[];
|
||||
/** Providers that have no data or no completed scans */
|
||||
providersWithoutData: Array<{
|
||||
id: string;
|
||||
name: string;
|
||||
type: string;
|
||||
}>;
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
"use server";
|
||||
|
||||
import {
|
||||
getDateFromForTimeRange,
|
||||
type TimeRange,
|
||||
} from "@/app/(prowler)/_new-overview/severity-over-time/_constants/time-range.constants";
|
||||
import { apiBaseUrl, getAuthHeaders } from "@/lib";
|
||||
import { handleApiResponse } from "@/lib/server-actions-helper";
|
||||
|
||||
@@ -9,20 +13,6 @@ import {
|
||||
FindingsSeverityOverTimeResponse,
|
||||
} from "./types";
|
||||
|
||||
const TIME_RANGE_VALUES = {
|
||||
FIVE_DAYS: "5D",
|
||||
ONE_WEEK: "1W",
|
||||
ONE_MONTH: "1M",
|
||||
} as const;
|
||||
|
||||
type TimeRange = (typeof TIME_RANGE_VALUES)[keyof typeof TIME_RANGE_VALUES];
|
||||
|
||||
const TIME_RANGE_DAYS: Record<TimeRange, number> = {
|
||||
"5D": 5,
|
||||
"1W": 7,
|
||||
"1M": 30,
|
||||
};
|
||||
|
||||
export type SeverityTrendsResult =
|
||||
| { status: "success"; data: AdaptedSeverityTrendsResponse }
|
||||
| { status: "empty" }
|
||||
@@ -76,21 +66,9 @@ export const getSeverityTrendsByTimeRange = async ({
|
||||
timeRange: TimeRange;
|
||||
filters?: Record<string, string | string[] | undefined>;
|
||||
}): Promise<SeverityTrendsResult> => {
|
||||
const days = TIME_RANGE_DAYS[timeRange];
|
||||
|
||||
if (!days) {
|
||||
console.error("Invalid time range provided");
|
||||
return { status: "error" };
|
||||
}
|
||||
|
||||
const endDate = new Date();
|
||||
const startDate = new Date(endDate.getTime() - days * 24 * 60 * 60 * 1000);
|
||||
|
||||
const dateFrom = startDate.toISOString().split("T")[0];
|
||||
|
||||
const dateFilters = {
|
||||
...filters,
|
||||
date_from: dateFrom,
|
||||
"filter[date_from]": getDateFromForTimeRange(timeRange),
|
||||
};
|
||||
|
||||
return getFindingsSeverityTrends({ filters: dateFilters });
|
||||
|
||||
@@ -11,15 +11,15 @@ export const GRAPH_TABS = [
|
||||
id: "threat-map",
|
||||
label: "Threat Map",
|
||||
},
|
||||
{
|
||||
id: "risk-plot",
|
||||
label: "Risk Plot",
|
||||
},
|
||||
// TODO: Uncomment when ready to enable other tabs
|
||||
// {
|
||||
// id: "risk-radar",
|
||||
// label: "Risk Radar",
|
||||
// },
|
||||
// {
|
||||
// id: "risk-plot",
|
||||
// label: "Risk Plot",
|
||||
// },
|
||||
] as const;
|
||||
|
||||
export type TabId = (typeof GRAPH_TABS)[number]["id"];
|
||||
|
||||
@@ -7,9 +7,9 @@ import { GraphsTabsClient } from "./_components/graphs-tabs-client";
|
||||
import { GRAPH_TABS, type TabId } from "./_config/graphs-tabs-config";
|
||||
import { FindingsViewSSR } from "./findings-view";
|
||||
import { RiskPipelineViewSSR } from "./risk-pipeline-view/risk-pipeline-view.ssr";
|
||||
import { RiskPlotSSR } from "./risk-plot/risk-plot.ssr";
|
||||
import { ThreatMapViewSSR } from "./threat-map-view/threat-map-view.ssr";
|
||||
// TODO: Uncomment when ready to enable other tabs
|
||||
// import { RiskPlotView } from "./risk-plot/risk-plot-view";
|
||||
// import { RiskRadarViewSSR } from "./risk-radar-view/risk-radar-view.ssr";
|
||||
|
||||
const LoadingFallback = () => (
|
||||
@@ -25,9 +25,9 @@ const GRAPH_COMPONENTS: Record<TabId, GraphComponent> = {
|
||||
findings: FindingsViewSSR as GraphComponent,
|
||||
"risk-pipeline": RiskPipelineViewSSR as GraphComponent,
|
||||
"threat-map": ThreatMapViewSSR as GraphComponent,
|
||||
"risk-plot": RiskPlotSSR as GraphComponent,
|
||||
// TODO: Uncomment when ready to enable other tabs
|
||||
// "risk-radar": RiskRadarViewSSR as GraphComponent,
|
||||
// "risk-plot": RiskPlotView as GraphComponent,
|
||||
};
|
||||
|
||||
interface GraphsTabsWrapperProps {
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
"use client";
|
||||
|
||||
/**
|
||||
* Risk Plot Client Component
|
||||
*
|
||||
* NOTE: This component uses CSS variables (var()) for Recharts styling.
|
||||
* Recharts SVG-based components (Scatter, XAxis, YAxis, CartesianGrid, etc.)
|
||||
* do not support Tailwind classes and require raw color values or CSS variables.
|
||||
* This is a documented limitation of the Recharts library.
|
||||
* @see https://recharts.org/en-US/api
|
||||
*/
|
||||
|
||||
import { useRouter, useSearchParams } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import {
|
||||
CartesianGrid,
|
||||
Legend,
|
||||
ResponsiveContainer,
|
||||
Scatter,
|
||||
ScatterChart,
|
||||
@@ -12,6 +22,7 @@ import {
|
||||
YAxis,
|
||||
} from "recharts";
|
||||
|
||||
import type { RiskPlotPoint } from "@/actions/overview/risk-plot";
|
||||
import { HorizontalBarChart } from "@/components/graphs/horizontal-bar-chart";
|
||||
import { AlertPill } from "@/components/graphs/shared/alert-pill";
|
||||
import { ChartLegend } from "@/components/graphs/shared/chart-legend";
|
||||
@@ -19,69 +30,83 @@ import {
|
||||
AXIS_FONT_SIZE,
|
||||
CustomXAxisTick,
|
||||
} from "@/components/graphs/shared/custom-axis-tick";
|
||||
import { getSeverityColorByRiskScore } from "@/components/graphs/shared/utils";
|
||||
import type { BarDataPoint } from "@/components/graphs/types";
|
||||
import { mapProviderFiltersForFindings } from "@/lib/provider-helpers";
|
||||
import { SEVERITY_FILTER_MAP } from "@/types/severities";
|
||||
|
||||
const PROVIDER_COLORS = {
|
||||
AWS: "var(--color-bg-data-aws)",
|
||||
Azure: "var(--color-bg-data-azure)",
|
||||
Google: "var(--color-bg-data-gcp)",
|
||||
};
|
||||
// Threat Score colors (0-100 scale, higher = better)
|
||||
const THREAT_COLORS = {
|
||||
DANGER: "var(--bg-fail-primary)", // 0-30
|
||||
WARNING: "var(--bg-warning-primary)", // 31-60
|
||||
SUCCESS: "var(--bg-pass-primary)", // 61-100
|
||||
} as const;
|
||||
|
||||
export interface ScatterPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
provider: string;
|
||||
name: string;
|
||||
severityData?: BarDataPoint[];
|
||||
/**
|
||||
* Get color based on ThreatScore (0-100 scale, higher = better)
|
||||
*/
|
||||
function getThreatScoreColor(score: number): string {
|
||||
if (score > 60) return THREAT_COLORS.SUCCESS;
|
||||
if (score > 30) return THREAT_COLORS.WARNING;
|
||||
return THREAT_COLORS.DANGER;
|
||||
}
|
||||
|
||||
// Provider colors from globals.css
|
||||
const PROVIDER_COLORS: Record<string, string> = {
|
||||
AWS: "var(--bg-data-aws)",
|
||||
Azure: "var(--bg-data-azure)",
|
||||
"Google Cloud": "var(--bg-data-gcp)",
|
||||
Kubernetes: "var(--bg-data-kubernetes)",
|
||||
"Microsoft 365": "var(--bg-data-m365)",
|
||||
GitHub: "var(--bg-data-github)",
|
||||
"MongoDB Atlas": "var(--bg-data-azure)",
|
||||
"Infrastructure as Code": "var(--bg-data-kubernetes)",
|
||||
"Oracle Cloud Infrastructure": "var(--bg-data-gcp)",
|
||||
};
|
||||
|
||||
interface RiskPlotClientProps {
|
||||
data: ScatterPoint[];
|
||||
data: RiskPlotPoint[];
|
||||
}
|
||||
|
||||
interface TooltipProps {
|
||||
active?: boolean;
|
||||
payload?: Array<{ payload: ScatterPoint }>;
|
||||
payload?: Array<{ payload: RiskPlotPoint }>;
|
||||
}
|
||||
|
||||
interface ScatterDotProps {
|
||||
// Props that Recharts passes to the shape component
|
||||
interface RechartsScatterDotProps {
|
||||
cx: number;
|
||||
cy: number;
|
||||
payload: ScatterPoint;
|
||||
selectedPoint: ScatterPoint | null;
|
||||
onSelectPoint: (point: ScatterPoint) => void;
|
||||
allData: ScatterPoint[];
|
||||
payload: RiskPlotPoint;
|
||||
}
|
||||
|
||||
interface LegendProps {
|
||||
payload?: Array<{ value: string; color: string }>;
|
||||
// Extended props for our custom scatter dot component
|
||||
interface ScatterDotProps extends RechartsScatterDotProps {
|
||||
selectedPoint: RiskPlotPoint | null;
|
||||
onSelectPoint: (point: RiskPlotPoint) => void;
|
||||
allData: RiskPlotPoint[];
|
||||
selectedProvider: string | null;
|
||||
}
|
||||
|
||||
const CustomTooltip = ({ active, payload }: TooltipProps) => {
|
||||
if (active && payload && payload.length) {
|
||||
const data = payload[0].payload;
|
||||
const severityColor = getSeverityColorByRiskScore(data.x);
|
||||
if (!active || !payload?.length) return null;
|
||||
|
||||
return (
|
||||
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
|
||||
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
|
||||
{data.name}
|
||||
</p>
|
||||
<p className="text-text-neutral-secondary text-sm font-medium">
|
||||
{/* Dynamic color from getSeverityColorByRiskScore - required inline style */}
|
||||
<span style={{ color: severityColor, fontWeight: "bold" }}>
|
||||
{data.x}
|
||||
</span>{" "}
|
||||
Risk Score
|
||||
</p>
|
||||
<div className="mt-2">
|
||||
<AlertPill value={data.y} />
|
||||
</div>
|
||||
const { name, x, y } = payload[0].payload;
|
||||
const scoreColor = getThreatScoreColor(x);
|
||||
|
||||
return (
|
||||
<div className="border-border-neutral-tertiary bg-bg-neutral-tertiary pointer-events-none min-w-[200px] rounded-xl border p-3 shadow-lg">
|
||||
<p className="text-text-neutral-primary mb-2 text-sm font-semibold">
|
||||
{name}
|
||||
</p>
|
||||
<p className="text-text-neutral-secondary text-sm font-medium">
|
||||
<span style={{ color: scoreColor, fontWeight: "bold" }}>{x}%</span>{" "}
|
||||
Threat Score
|
||||
</p>
|
||||
<div className="mt-2">
|
||||
<AlertPill value={y} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const CustomScatterDot = ({
|
||||
@@ -91,24 +116,31 @@ const CustomScatterDot = ({
|
||||
selectedPoint,
|
||||
onSelectPoint,
|
||||
allData,
|
||||
selectedProvider,
|
||||
}: ScatterDotProps) => {
|
||||
const isSelected = selectedPoint?.name === payload.name;
|
||||
const size = isSelected ? 18 : 8;
|
||||
const selectedColor = "var(--bg-button-primary)"; // emerald-400
|
||||
const selectedColor = "var(--bg-button-primary)";
|
||||
const fill = isSelected
|
||||
? selectedColor
|
||||
: PROVIDER_COLORS[payload.provider as keyof typeof PROVIDER_COLORS] ||
|
||||
"var(--color-text-neutral-tertiary)";
|
||||
: PROVIDER_COLORS[payload.provider] || "var(--color-text-neutral-tertiary)";
|
||||
const isFaded =
|
||||
selectedProvider !== null && payload.provider !== selectedProvider;
|
||||
|
||||
const handleClick = () => {
|
||||
const fullDataItem = allData?.find(
|
||||
(d: ScatterPoint) => d.name === payload.name,
|
||||
);
|
||||
const fullDataItem = allData?.find((d) => d.name === payload.name);
|
||||
onSelectPoint?.(fullDataItem || payload);
|
||||
};
|
||||
|
||||
return (
|
||||
<g style={{ cursor: "pointer" }} onClick={handleClick}>
|
||||
<g
|
||||
style={{
|
||||
cursor: "pointer",
|
||||
opacity: isFaded ? 0.2 : 1,
|
||||
transition: "opacity 0.2s",
|
||||
}}
|
||||
onClick={handleClick}
|
||||
>
|
||||
{isSelected && (
|
||||
<>
|
||||
<circle
|
||||
@@ -143,60 +175,86 @@ const CustomScatterDot = ({
|
||||
);
|
||||
};
|
||||
|
||||
const CustomLegend = ({ payload }: LegendProps) => {
|
||||
const items =
|
||||
payload?.map((entry: { value: string; color: string }) => ({
|
||||
label: entry.value,
|
||||
color: entry.color,
|
||||
})) || [];
|
||||
|
||||
return <ChartLegend items={items} />;
|
||||
};
|
||||
|
||||
/**
|
||||
* Factory function that creates a scatter dot shape component with closure over selection state.
|
||||
* Recharts shape prop types the callback parameter as `unknown` due to its flexible API.
|
||||
* We safely cast to RechartsScatterDotProps since we know the actual shape of props passed by Scatter.
|
||||
* @see https://recharts.org/en-US/api/Scatter#shape
|
||||
*/
|
||||
function createScatterDotShape(
|
||||
selectedPoint: ScatterPoint | null,
|
||||
onSelectPoint: (point: ScatterPoint) => void,
|
||||
allData: ScatterPoint[],
|
||||
) {
|
||||
const ScatterDotShape = (props: unknown) => {
|
||||
const dotProps = props as Omit<
|
||||
ScatterDotProps,
|
||||
"selectedPoint" | "onSelectPoint" | "allData"
|
||||
>;
|
||||
return (
|
||||
<CustomScatterDot
|
||||
{...dotProps}
|
||||
selectedPoint={selectedPoint}
|
||||
onSelectPoint={onSelectPoint}
|
||||
allData={allData}
|
||||
/>
|
||||
);
|
||||
};
|
||||
selectedPoint: RiskPlotPoint | null,
|
||||
onSelectPoint: (point: RiskPlotPoint) => void,
|
||||
allData: RiskPlotPoint[],
|
||||
selectedProvider: string | null,
|
||||
): (props: unknown) => React.JSX.Element {
|
||||
const ScatterDotShape = (props: unknown) => (
|
||||
<CustomScatterDot
|
||||
{...(props as RechartsScatterDotProps)}
|
||||
selectedPoint={selectedPoint}
|
||||
onSelectPoint={onSelectPoint}
|
||||
allData={allData}
|
||||
selectedProvider={selectedProvider}
|
||||
/>
|
||||
);
|
||||
ScatterDotShape.displayName = "ScatterDotShape";
|
||||
return ScatterDotShape;
|
||||
}
|
||||
|
||||
export function RiskPlotClient({ data }: RiskPlotClientProps) {
|
||||
const [selectedPoint, setSelectedPoint] = useState<ScatterPoint | null>(null);
|
||||
const router = useRouter();
|
||||
const searchParams = useSearchParams();
|
||||
const [selectedPoint, setSelectedPoint] = useState<RiskPlotPoint | null>(
|
||||
null,
|
||||
);
|
||||
const [selectedProvider, setSelectedProvider] = useState<string | null>(null);
|
||||
|
||||
const dataByProvider = data.reduce(
|
||||
// Group data by provider for separate Scatter series
|
||||
const dataByProvider = data.reduce<Record<string, RiskPlotPoint[]>>(
|
||||
(acc, point) => {
|
||||
const provider = point.provider;
|
||||
if (!acc[provider]) {
|
||||
acc[provider] = [];
|
||||
}
|
||||
acc[provider].push(point);
|
||||
(acc[point.provider] ??= []).push(point);
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, typeof data>,
|
||||
{},
|
||||
);
|
||||
|
||||
const handleSelectPoint = (point: ScatterPoint) => {
|
||||
if (selectedPoint?.name === point.name) {
|
||||
setSelectedPoint(null);
|
||||
} else {
|
||||
setSelectedPoint(point);
|
||||
const providers = Object.keys(dataByProvider);
|
||||
|
||||
const handleSelectPoint = (point: RiskPlotPoint) => {
|
||||
setSelectedPoint((current) =>
|
||||
current?.name === point.name ? null : point,
|
||||
);
|
||||
};
|
||||
|
||||
const handleProviderClick = (provider: string) => {
|
||||
setSelectedProvider((current) => (current === provider ? null : provider));
|
||||
};
|
||||
|
||||
const handleBarClick = (dataPoint: BarDataPoint) => {
|
||||
if (!selectedPoint) return;
|
||||
|
||||
// Build the URL with current filters
|
||||
const params = new URLSearchParams(searchParams.toString());
|
||||
|
||||
// Transform provider filters (provider_id__in -> provider__in)
|
||||
mapProviderFiltersForFindings(params);
|
||||
|
||||
// Add severity filter
|
||||
const severity = SEVERITY_FILTER_MAP[dataPoint.name];
|
||||
if (severity) {
|
||||
params.set("filter[severity__in]", severity);
|
||||
}
|
||||
|
||||
// Add provider filter for the selected point
|
||||
params.set("filter[provider__in]", selectedPoint.providerId);
|
||||
|
||||
// Add exclude muted findings filter
|
||||
params.set("filter[muted]", "false");
|
||||
|
||||
// Filter by FAIL findings
|
||||
params.set("filter[status__in]", "FAIL");
|
||||
|
||||
// Navigate to findings page
|
||||
router.push(`/findings?${params.toString()}`);
|
||||
};
|
||||
|
||||
return (
|
||||
@@ -204,26 +262,14 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
|
||||
<div className="flex flex-1 gap-12">
|
||||
{/* Plot Section - in Card */}
|
||||
<div className="flex basis-[70%] flex-col">
|
||||
<div
|
||||
className="flex flex-1 flex-col rounded-lg border p-4"
|
||||
style={{
|
||||
borderColor: "var(--border-neutral-primary)",
|
||||
backgroundColor: "var(--bg-neutral-secondary)",
|
||||
}}
|
||||
>
|
||||
<div className="border-border-neutral-primary bg-bg-neutral-secondary flex flex-1 flex-col rounded-lg border p-4">
|
||||
<div className="mb-4">
|
||||
<h3
|
||||
className="text-lg font-semibold"
|
||||
style={{ color: "var(--text-neutral-primary)" }}
|
||||
>
|
||||
<h3 className="text-text-neutral-primary text-lg font-semibold">
|
||||
Risk Plot
|
||||
</h3>
|
||||
</div>
|
||||
|
||||
<div
|
||||
className="relative w-full flex-1"
|
||||
style={{ minHeight: "400px" }}
|
||||
>
|
||||
<div className="relative min-h-[400px] w-full flex-1">
|
||||
<ResponsiveContainer width="100%" height="100%">
|
||||
<ScatterChart
|
||||
margin={{ top: 20, right: 30, bottom: 60, left: 60 }}
|
||||
@@ -237,16 +283,16 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
|
||||
<XAxis
|
||||
type="number"
|
||||
dataKey="x"
|
||||
name="Risk Score"
|
||||
name="Threat Score"
|
||||
label={{
|
||||
value: "Risk Score",
|
||||
value: "Threat Score",
|
||||
position: "bottom",
|
||||
offset: 10,
|
||||
fill: "var(--color-text-neutral-secondary)",
|
||||
}}
|
||||
tick={CustomXAxisTick}
|
||||
tickLine={false}
|
||||
domain={[0, 10]}
|
||||
domain={[0, 100]}
|
||||
axisLine={false}
|
||||
/>
|
||||
<YAxis
|
||||
@@ -268,30 +314,43 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
|
||||
axisLine={false}
|
||||
/>
|
||||
<Tooltip content={<CustomTooltip />} />
|
||||
<Legend
|
||||
content={<CustomLegend />}
|
||||
wrapperStyle={{ paddingTop: "40px" }}
|
||||
/>
|
||||
{Object.entries(dataByProvider).map(([provider, points]) => (
|
||||
<Scatter
|
||||
key={provider}
|
||||
name={provider}
|
||||
data={points}
|
||||
fill={
|
||||
PROVIDER_COLORS[
|
||||
provider as keyof typeof PROVIDER_COLORS
|
||||
] || "var(--color-text-neutral-tertiary)"
|
||||
PROVIDER_COLORS[provider] ||
|
||||
"var(--color-text-neutral-tertiary)"
|
||||
}
|
||||
shape={createScatterDotShape(
|
||||
selectedPoint,
|
||||
handleSelectPoint,
|
||||
data,
|
||||
selectedProvider,
|
||||
)}
|
||||
/>
|
||||
))}
|
||||
</ScatterChart>
|
||||
</ResponsiveContainer>
|
||||
</div>
|
||||
|
||||
{/* Interactive Legend - below chart */}
|
||||
<div className="mt-4 flex flex-col items-start gap-2">
|
||||
<p className="text-text-neutral-tertiary pl-2 text-xs">
|
||||
Click to filter by provider.
|
||||
</p>
|
||||
<ChartLegend
|
||||
items={providers.map((p) => ({
|
||||
label: p,
|
||||
color:
|
||||
PROVIDER_COLORS[p] || "var(--color-text-neutral-tertiary)",
|
||||
dataKey: p,
|
||||
}))}
|
||||
selectedItem={selectedProvider}
|
||||
onItemClick={handleProviderClick}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -300,28 +359,22 @@ export function RiskPlotClient({ data }: RiskPlotClientProps) {
|
||||
{selectedPoint && selectedPoint.severityData ? (
|
||||
<div className="flex w-full flex-col">
|
||||
<div className="mb-4">
|
||||
<h4
|
||||
className="text-base font-semibold"
|
||||
style={{ color: "var(--text-neutral-primary)" }}
|
||||
>
|
||||
<h4 className="text-text-neutral-primary text-base font-semibold">
|
||||
{selectedPoint.name}
|
||||
</h4>
|
||||
<p
|
||||
className="text-xs"
|
||||
style={{ color: "var(--text-neutral-tertiary)" }}
|
||||
>
|
||||
Risk Score: {selectedPoint.x} | Failed Findings:{" "}
|
||||
<p className="text-text-neutral-tertiary text-xs">
|
||||
Threat Score: {selectedPoint.x}% | Failed Findings:{" "}
|
||||
{selectedPoint.y}
|
||||
</p>
|
||||
</div>
|
||||
<HorizontalBarChart data={selectedPoint.severityData} />
|
||||
<HorizontalBarChart
|
||||
data={selectedPoint.severityData}
|
||||
onBarClick={handleBarClick}
|
||||
/>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex w-full items-center justify-center text-center">
|
||||
<p
|
||||
className="text-sm"
|
||||
style={{ color: "var(--text-neutral-tertiary)" }}
|
||||
>
|
||||
<p className="text-text-neutral-tertiary text-sm">
|
||||
Select a point on the plot to view details
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
import { RiskPlotClient, type ScatterPoint } from "./risk-plot-client";
|
||||
|
||||
// Mock data - Risk Score (0-10) vs Failed Findings count
|
||||
const mockScatterData: ScatterPoint[] = [
|
||||
{
|
||||
x: 9.2,
|
||||
y: 1456,
|
||||
provider: "AWS",
|
||||
name: "Amazon RDS",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 456 },
|
||||
{ name: "High", value: 600 },
|
||||
{ name: "Medium", value: 250 },
|
||||
{ name: "Low", value: 120 },
|
||||
{ name: "Info", value: 30 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 8.5,
|
||||
y: 892,
|
||||
provider: "AWS",
|
||||
name: "Amazon EC2",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 280 },
|
||||
{ name: "High", value: 350 },
|
||||
{ name: "Medium", value: 180 },
|
||||
{ name: "Low", value: 70 },
|
||||
{ name: "Info", value: 12 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 7.1,
|
||||
y: 445,
|
||||
provider: "AWS",
|
||||
name: "Amazon S3",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 140 },
|
||||
{ name: "High", value: 180 },
|
||||
{ name: "Medium", value: 90 },
|
||||
{ name: "Low", value: 30 },
|
||||
{ name: "Info", value: 5 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 6.3,
|
||||
y: 678,
|
||||
provider: "AWS",
|
||||
name: "AWS Lambda",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 214 },
|
||||
{ name: "High", value: 270 },
|
||||
{ name: "Medium", value: 135 },
|
||||
{ name: "Low", value: 54 },
|
||||
{ name: "Info", value: 5 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 4.2,
|
||||
y: 156,
|
||||
provider: "AWS",
|
||||
name: "AWS Backup",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 49 },
|
||||
{ name: "High", value: 62 },
|
||||
{ name: "Medium", value: 31 },
|
||||
{ name: "Low", value: 12 },
|
||||
{ name: "Info", value: 2 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 8.8,
|
||||
y: 1023,
|
||||
provider: "Azure",
|
||||
name: "Azure SQL Database",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 323 },
|
||||
{ name: "High", value: 410 },
|
||||
{ name: "Medium", value: 205 },
|
||||
{ name: "Low", value: 82 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 7.9,
|
||||
y: 834,
|
||||
provider: "Azure",
|
||||
name: "Azure Virtual Machines",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 263 },
|
||||
{ name: "High", value: 334 },
|
||||
{ name: "Medium", value: 167 },
|
||||
{ name: "Low", value: 67 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 6.4,
|
||||
y: 567,
|
||||
provider: "Azure",
|
||||
name: "Azure Storage",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 179 },
|
||||
{ name: "High", value: 227 },
|
||||
{ name: "Medium", value: 113 },
|
||||
{ name: "Low", value: 45 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 5.1,
|
||||
y: 289,
|
||||
provider: "Azure",
|
||||
name: "Azure Key Vault",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 91 },
|
||||
{ name: "High", value: 115 },
|
||||
{ name: "Medium", value: 58 },
|
||||
{ name: "Low", value: 23 },
|
||||
{ name: "Info", value: 2 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 7.6,
|
||||
y: 712,
|
||||
provider: "Google",
|
||||
name: "Cloud SQL",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 225 },
|
||||
{ name: "High", value: 285 },
|
||||
{ name: "Medium", value: 142 },
|
||||
{ name: "Low", value: 57 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 6.9,
|
||||
y: 623,
|
||||
provider: "Google",
|
||||
name: "Compute Engine",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 197 },
|
||||
{ name: "High", value: 249 },
|
||||
{ name: "Medium", value: 124 },
|
||||
{ name: "Low", value: 50 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 5.8,
|
||||
y: 412,
|
||||
provider: "Google",
|
||||
name: "Cloud Storage",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 130 },
|
||||
{ name: "High", value: 165 },
|
||||
{ name: "Medium", value: 82 },
|
||||
{ name: "Low", value: 33 },
|
||||
{ name: "Info", value: 2 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 4.5,
|
||||
y: 198,
|
||||
provider: "Google",
|
||||
name: "Cloud Run",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 63 },
|
||||
{ name: "High", value: 79 },
|
||||
{ name: "Medium", value: 39 },
|
||||
{ name: "Low", value: 16 },
|
||||
{ name: "Info", value: 1 },
|
||||
],
|
||||
},
|
||||
{
|
||||
x: 8.9,
|
||||
y: 945,
|
||||
provider: "AWS",
|
||||
name: "Amazon RDS Aurora",
|
||||
severityData: [
|
||||
{ name: "Critical", value: 299 },
|
||||
{ name: "High", value: 378 },
|
||||
{ name: "Medium", value: 189 },
|
||||
{ name: "Low", value: 76 },
|
||||
{ name: "Info", value: 3 },
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export function RiskPlotView() {
|
||||
return <RiskPlotClient data={mockScatterData} />;
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
import { Info } from "lucide-react";
|
||||
|
||||
import {
|
||||
adaptToRiskPlotData,
|
||||
getProvidersRiskData,
|
||||
} from "@/actions/overview/risk-plot";
|
||||
import { getProviders } from "@/actions/providers";
|
||||
import { SearchParamsProps } from "@/types";
|
||||
|
||||
import { pickFilterParams } from "../../_lib/filter-params";
|
||||
import { RiskPlotClient } from "./risk-plot-client";
|
||||
|
||||
export async function RiskPlotSSR({
|
||||
searchParams,
|
||||
}: {
|
||||
searchParams: SearchParamsProps;
|
||||
}) {
|
||||
const filters = pickFilterParams(searchParams);
|
||||
|
||||
const providerTypeFilter = filters["filter[provider_type__in]"];
|
||||
const providerIdFilter = filters["filter[provider_id__in]"];
|
||||
|
||||
// Fetch all providers
|
||||
const providersListResponse = await getProviders({ pageSize: 200 });
|
||||
const allProviders = providersListResponse?.data || [];
|
||||
|
||||
// Filter providers based on search params
|
||||
let filteredProviders = allProviders;
|
||||
|
||||
if (providerIdFilter) {
|
||||
// Filter by specific provider IDs
|
||||
const selectedIds = String(providerIdFilter)
|
||||
.split(",")
|
||||
.map((id) => id.trim());
|
||||
filteredProviders = allProviders.filter((p) => selectedIds.includes(p.id));
|
||||
} else if (providerTypeFilter) {
|
||||
// Filter by provider types
|
||||
const selectedTypes = String(providerTypeFilter)
|
||||
.split(",")
|
||||
.map((t) => t.trim().toLowerCase());
|
||||
filteredProviders = allProviders.filter((p) =>
|
||||
selectedTypes.includes(p.attributes.provider.toLowerCase()),
|
||||
);
|
||||
}
|
||||
|
||||
// No providers to show
|
||||
if (filteredProviders.length === 0) {
|
||||
return (
|
||||
<div className="flex h-[460px] w-full items-center justify-center">
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<Info size={48} className="text-text-neutral-tertiary" />
|
||||
<p className="text-text-neutral-secondary text-sm">
|
||||
No providers available for the selected filters
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Fetch risk data for all filtered providers in parallel
|
||||
const providersRiskData = await getProvidersRiskData(filteredProviders);
|
||||
|
||||
// Transform to chart format
|
||||
const { points, providersWithoutData } =
|
||||
adaptToRiskPlotData(providersRiskData);
|
||||
|
||||
// No data available
|
||||
if (points.length === 0) {
|
||||
return (
|
||||
<div className="flex h-[460px] w-full items-center justify-center">
|
||||
<div className="flex flex-col items-center gap-2 text-center">
|
||||
<Info size={48} className="text-text-neutral-tertiary" />
|
||||
<p className="text-text-neutral-secondary text-sm">
|
||||
No risk data available for the selected providers
|
||||
</p>
|
||||
{providersWithoutData.length > 0 && (
|
||||
<p className="text-text-neutral-tertiary text-xs">
|
||||
{providersWithoutData.length} provider(s) have no completed scans
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="w-full flex-1 overflow-visible">
|
||||
<RiskPlotClient data={points} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
+2
-1
@@ -13,6 +13,7 @@ import {
|
||||
SeverityLevel,
|
||||
} from "@/types/severities";
|
||||
|
||||
import { DEFAULT_TIME_RANGE } from "../_constants/time-range.constants";
|
||||
import { type TimeRange, TimeRangeSelector } from "./time-range-selector";
|
||||
|
||||
interface FindingSeverityOverTimeProps {
|
||||
@@ -24,7 +25,7 @@ export const FindingSeverityOverTime = ({
|
||||
}: FindingSeverityOverTimeProps) => {
|
||||
const router = useRouter();
|
||||
const searchParams = useSearchParams();
|
||||
const [timeRange, setTimeRange] = useState<TimeRange>("5D");
|
||||
const [timeRange, setTimeRange] = useState<TimeRange>(DEFAULT_TIME_RANGE);
|
||||
const [data, setData] = useState<LineDataPoint[]>(initialData);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
+5
-7
@@ -2,14 +2,12 @@
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const TIME_RANGE_OPTIONS = {
|
||||
FIVE_DAYS: "5D",
|
||||
ONE_WEEK: "1W",
|
||||
ONE_MONTH: "1M",
|
||||
} as const;
|
||||
import {
|
||||
TIME_RANGE_OPTIONS,
|
||||
type TimeRange,
|
||||
} from "../_constants/time-range.constants";
|
||||
|
||||
export type TimeRange =
|
||||
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
|
||||
export type { TimeRange };
|
||||
|
||||
interface TimeRangeSelectorProps {
|
||||
value: TimeRange;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
export * from "./time-range.constants";
|
||||
@@ -0,0 +1,23 @@
|
||||
export const TIME_RANGE_OPTIONS = {
|
||||
FIVE_DAYS: "5D",
|
||||
ONE_WEEK: "1W",
|
||||
ONE_MONTH: "1M",
|
||||
} as const;
|
||||
|
||||
export type TimeRange =
|
||||
(typeof TIME_RANGE_OPTIONS)[keyof typeof TIME_RANGE_OPTIONS];
|
||||
|
||||
export const TIME_RANGE_DAYS: Record<TimeRange, number> = {
|
||||
"5D": 5,
|
||||
"1W": 7,
|
||||
"1M": 30,
|
||||
};
|
||||
|
||||
export const DEFAULT_TIME_RANGE: TimeRange = "5D";
|
||||
|
||||
export const getDateFromForTimeRange = (timeRange: TimeRange): string => {
|
||||
const days = TIME_RANGE_DAYS[timeRange];
|
||||
const date = new Date();
|
||||
date.setDate(date.getDate() - days);
|
||||
return date.toISOString().split("T")[0];
|
||||
};
|
||||
+7
-2
@@ -1,10 +1,11 @@
|
||||
import { getFindingsSeverityTrends } from "@/actions/overview/severity-trends";
|
||||
import { getSeverityTrendsByTimeRange } from "@/actions/overview/severity-trends";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/shadcn";
|
||||
|
||||
import { pickFilterParams } from "../_lib/filter-params";
|
||||
import { SSRComponentProps } from "../_types";
|
||||
import { FindingSeverityOverTime } from "./_components/finding-severity-over-time";
|
||||
import { FindingSeverityOverTimeSkeleton } from "./_components/finding-severity-over-time.skeleton";
|
||||
import { DEFAULT_TIME_RANGE } from "./_constants/time-range.constants";
|
||||
|
||||
export { FindingSeverityOverTimeSkeleton };
|
||||
|
||||
@@ -25,7 +26,11 @@ export const FindingSeverityOverTimeSSR = async ({
|
||||
searchParams,
|
||||
}: SSRComponentProps) => {
|
||||
const filters = pickFilterParams(searchParams);
|
||||
const result = await getFindingsSeverityTrends({ filters });
|
||||
|
||||
const result = await getSeverityTrendsByTimeRange({
|
||||
timeRange: DEFAULT_TIME_RANGE,
|
||||
filters,
|
||||
});
|
||||
|
||||
if (result.status === "error") {
|
||||
return <EmptyState message="Failed to load severity trends data" />;
|
||||
|
||||
@@ -32,11 +32,11 @@ export function AlertPill({
|
||||
>
|
||||
<AlertTriangle
|
||||
size={iconSize}
|
||||
style={{ color: "var(--color-text-text-error)" }}
|
||||
style={{ color: "var(--color-text-error-primary)" }}
|
||||
/>
|
||||
<span
|
||||
className={cn(textSizeClass, "font-semibold")}
|
||||
style={{ color: "var(--color-text-text-error)" }}
|
||||
style={{ color: "var(--color-text-error-primary)" }}
|
||||
>
|
||||
{value}
|
||||
</span>
|
||||
|
||||
Reference in New Issue
Block a user