Compare commits
154 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2a3c71c435 | |||
| 07a995a210 | |||
| 73bf93a036 | |||
| d53e76e30a | |||
| e2344b4fc6 | |||
| eb1fc2b269 | |||
| 4931564648 | |||
| 433853493b | |||
| 5aa112d438 | |||
| 1b2c73d2e3 | |||
| 90e3fabc33 | |||
| d4b90abd10 | |||
| 251fc6d4e3 | |||
| dd85da703e | |||
| b549c8dbad | |||
| 79ac7cf6d4 | |||
| d292c6e58a | |||
| 8f361e7e8d | |||
| 3eb278cb9f | |||
| 2f7eec8bca | |||
| 00063c57de | |||
| 2341b5bc7d | |||
| 4015beff20 | |||
| ab475bafc3 | |||
| b4ce01afd4 | |||
| 2b4b23c719 | |||
| 4398b00801 | |||
| e0cf8bffd4 | |||
| 6761f0ffd0 | |||
| 51bbaeb403 | |||
| 6158c16108 | |||
| 0c2c5ea265 | |||
| 3b56166c34 | |||
| b5151a8ee5 | |||
| 0495267351 | |||
| eefe045c18 | |||
| d7d1b22c45 | |||
| 439dbe679b | |||
| 0e9ba4b116 | |||
| 89295f7e7d | |||
| 7cf7758851 | |||
| 06142094cd | |||
| 93f1c02f44 | |||
| e2f30e0987 | |||
| c80710adfc | |||
| 1410fe2ff1 | |||
| 284910d402 | |||
| 04f795bd49 | |||
| 8b5e00163e | |||
| 57d7f77c81 | |||
| 16b1052ff1 | |||
| 978e2c82af | |||
| 0c3ba0b737 | |||
| 4addfcc848 | |||
| 8588cc03f4 | |||
| 7507fea24b | |||
| 18f0fc693e | |||
| 606f505ba3 | |||
| bfce602859 | |||
| ba45b86a82 | |||
| d786bb4440 | |||
| 9424289416 | |||
| 3cbb6175a5 | |||
| 438deef3f8 | |||
| 1cdf4e65b2 | |||
| dbdd02ebd1 | |||
| d264f3daff | |||
| 01fe379b55 | |||
| 50286846e0 | |||
| 20ed8b3d2d | |||
| 45cc6e8b85 | |||
| 962c64eae5 | |||
| 7b56f0640f | |||
| 49c75cc418 | |||
| 56bca7c104 | |||
| faaa172b86 | |||
| 219ce0ba89 | |||
| 2170e5fe12 | |||
| e9efb12aa8 | |||
| 74d72dd56b | |||
| 06d1d214fd | |||
| 902bc9ad57 | |||
| 3616c0a8c0 | |||
| 7288585fec | |||
| 6400dc1059 | |||
| 379c1dc7dd | |||
| eb247360c3 | |||
| 7f12832808 | |||
| 9c387d5742 | |||
| 4a5801c519 | |||
| 85cb39af28 | |||
| c7abd77a1c | |||
| a622b9d965 | |||
| 8bd95a04ce | |||
| 340454ba68 | |||
| 6dff4bfd8b | |||
| 22c88e66a1 | |||
| 3b711f6143 | |||
| dbdce98cf2 | |||
| 53404dfa62 | |||
| c8872dd6ac | |||
| 26fd7d3adc | |||
| cb84bd0f94 | |||
| cb3f3ab35d | |||
| f58c1fddfb | |||
| c1bb51cf1a | |||
| a4e12a94f9 | |||
| 7b1915e489 | |||
| 56d092c87e | |||
| 29a1034658 | |||
| f5c2146d19 | |||
| 069f0d106c | |||
| 803ada7b16 | |||
| 5e033321e8 | |||
| 175d7f95f5 | |||
| 07e82bde56 | |||
| 4661e01c26 | |||
| dda0a2567d | |||
| 56ea498cca | |||
| f9e1e29631 | |||
| 3dadb264cc | |||
| 495aee015e | |||
| d3a000cbc4 | |||
| b2abdbeb60 | |||
| dc852b4595 | |||
| 1250f582a5 | |||
| bb43e924ee | |||
| 0225627a98 | |||
| 3097513525 | |||
| 6af9ff4b4b | |||
| 06fa57a949 | |||
| dc9e91ac4e | |||
| 59f8dfe5ae | |||
| 7e0c5540bb | |||
| 79ec53bfc5 | |||
| ed5f6b3af6 | |||
| 6e135abaa0 | |||
| 65b054f798 | |||
| 28d5b2bb6c | |||
| c8d9f37e70 | |||
| 9d7b9c3327 | |||
| 127b8d8e56 | |||
| 4e9dd46a5e | |||
| 880345bebe | |||
| 1259713fd6 | |||
| 26088868a2 | |||
| e58574e2a4 | |||
| a07e599cfc | |||
| e020b3f74b | |||
| 8e7e376e4f | |||
| a63a3d3f68 | |||
| 10838de636 | |||
| 5ebf455e04 | |||
| 0d59441c5f |
@@ -0,0 +1,20 @@
|
||||
# Gentleman Guardian Angel (gga) Configuration
|
||||
# https://github.com/Gentleman-Programming/gentleman-guardian-angel
|
||||
|
||||
# AI Provider (required)
|
||||
# Options: claude, gemini, codex, ollama:<model>
|
||||
PROVIDER="claude"
|
||||
|
||||
# File patterns to include in review (comma-separated globs)
|
||||
# Review both TypeScript (UI) and Python (SDK, API, MCP) files
|
||||
FILE_PATTERNS="*.ts,*.tsx,*.js,*.jsx,*.py"
|
||||
|
||||
# File patterns to exclude from review (comma-separated globs)
|
||||
# Excludes: test files, type definitions, and api/ folder (no AGENTS.md yet)
|
||||
EXCLUDE_PATTERNS="*.test.ts,*.test.tsx,*.spec.ts,*.spec.tsx,*.d.ts,*_test.py,test_*.py,conftest.py,api/*"
|
||||
|
||||
# File containing your coding standards (relative to repo root)
|
||||
RULES_FILE="AGENTS-CODE-REVIEW.md"
|
||||
|
||||
# Strict mode: fail if AI response is ambiguous (recommended)
|
||||
STRICT_MODE="true"
|
||||
@@ -87,7 +87,7 @@ runs:
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: always()
|
||||
with:
|
||||
name: trivy-scan-report-${{ inputs.image-name }}
|
||||
name: trivy-scan-report-${{ inputs.image-name }}-${{ inputs.image-tag }}
|
||||
path: trivy-report.json
|
||||
retention-days: ${{ inputs.artifact-retention-days }}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"channel": "${{ env.SLACK_CHANNEL_ID }}",
|
||||
"ts": "${{ env.MESSAGE_TS }}",
|
||||
"attachments": [
|
||||
{
|
||||
"color": "${{ env.STATUS_COLOR }}",
|
||||
|
||||
@@ -7,10 +7,16 @@ on:
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'prowler/**'
|
||||
- '.github/workflows/api-build-lint-push-containers.yml'
|
||||
- '.github/workflows/api-container-build-push.yml'
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -22,7 +28,7 @@ concurrency:
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./api
|
||||
|
||||
@@ -42,8 +48,34 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -73,7 +105,8 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push API container for ${{ matrix.arch }}
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
@@ -84,39 +117,10 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -139,7 +143,7 @@ jobs:
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
@@ -159,6 +163,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -20,6 +20,7 @@ env:
|
||||
|
||||
jobs:
|
||||
api-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
@@ -43,7 +44,17 @@ jobs:
|
||||
ignore: DL3013
|
||||
|
||||
api-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -68,22 +79,23 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build container
|
||||
- name: Build container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Scan container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
- name: Scan container with Trivy for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -10,6 +10,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -21,7 +27,7 @@ concurrency:
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./mcp_server
|
||||
|
||||
@@ -41,9 +47,35 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ${{ matrix.runner }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
@@ -71,7 +103,8 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push MCP container for ${{ matrix.arch }}
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
@@ -90,39 +123,10 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -145,14 +149,14 @@ jobs:
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
@@ -165,6 +169,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -20,6 +20,7 @@ env:
|
||||
|
||||
jobs:
|
||||
mcp-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
@@ -42,7 +43,17 @@ jobs:
|
||||
dockerfile: mcp_server/Dockerfile
|
||||
|
||||
mcp-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -66,22 +77,23 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build MCP container
|
||||
- name: Build MCP container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.MCP_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Scan MCP container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
- name: Scan MCP container with Trivy for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -88,59 +88,56 @@ jobs:
|
||||
|
||||
- name: Read changelog versions from release branch
|
||||
run: |
|
||||
# Function to extract the latest version from changelog
|
||||
extract_latest_version() {
|
||||
# Function to extract the version for a specific Prowler release from changelog
|
||||
# This looks for entries with "(Prowler X.Y.Z)" to find the released version
|
||||
extract_version_for_release() {
|
||||
local changelog_file="$1"
|
||||
local prowler_version="$2"
|
||||
if [ -f "$changelog_file" ]; then
|
||||
# Extract the first version entry (most recent) from changelog
|
||||
# Format: ## [version] (1.2.3) or ## [vversion] (v1.2.3)
|
||||
local version=$(grep -m 1 '^## \[' "$changelog_file" | sed 's/^## \[\(.*\)\].*/\1/' | sed 's/^v//' | tr -d '[:space:]')
|
||||
# Extract version that matches this Prowler release
|
||||
# Format: ## [version] (Prowler X.Y.Z) or ## [vversion] (Prowler vX.Y.Z)
|
||||
local version=$(grep '^## \[' "$changelog_file" | grep "(Prowler v\?${prowler_version})" | head -1 | sed 's/^## \[\(.*\)\].*/\1/' | sed 's/^v//' | tr -d '[:space:]')
|
||||
echo "$version"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Read actual versions from changelogs (source of truth)
|
||||
UI_VERSION=$(extract_latest_version "ui/CHANGELOG.md")
|
||||
API_VERSION=$(extract_latest_version "api/CHANGELOG.md")
|
||||
SDK_VERSION=$(extract_latest_version "prowler/CHANGELOG.md")
|
||||
MCP_VERSION=$(extract_latest_version "mcp_server/CHANGELOG.md")
|
||||
# Read versions from changelogs for this specific Prowler release
|
||||
SDK_VERSION=$(extract_version_for_release "prowler/CHANGELOG.md" "$PROWLER_VERSION")
|
||||
API_VERSION=$(extract_version_for_release "api/CHANGELOG.md" "$PROWLER_VERSION")
|
||||
UI_VERSION=$(extract_version_for_release "ui/CHANGELOG.md" "$PROWLER_VERSION")
|
||||
MCP_VERSION=$(extract_version_for_release "mcp_server/CHANGELOG.md" "$PROWLER_VERSION")
|
||||
|
||||
echo "UI_VERSION=${UI_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "API_VERSION=${API_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "SDK_VERSION=${SDK_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "API_VERSION=${API_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "UI_VERSION=${UI_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "MCP_VERSION=${MCP_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
if [ -n "$UI_VERSION" ]; then
|
||||
echo "Read UI version from changelog: $UI_VERSION"
|
||||
if [ -n "$SDK_VERSION" ]; then
|
||||
echo "✓ SDK version for Prowler $PROWLER_VERSION: $SDK_VERSION"
|
||||
else
|
||||
echo "Warning: No UI version found in ui/CHANGELOG.md"
|
||||
echo "ℹ No SDK version found for Prowler $PROWLER_VERSION in prowler/CHANGELOG.md"
|
||||
fi
|
||||
|
||||
if [ -n "$API_VERSION" ]; then
|
||||
echo "Read API version from changelog: $API_VERSION"
|
||||
echo "✓ API version for Prowler $PROWLER_VERSION: $API_VERSION"
|
||||
else
|
||||
echo "Warning: No API version found in api/CHANGELOG.md"
|
||||
echo "ℹ No API version found for Prowler $PROWLER_VERSION in api/CHANGELOG.md"
|
||||
fi
|
||||
|
||||
if [ -n "$SDK_VERSION" ]; then
|
||||
echo "Read SDK version from changelog: $SDK_VERSION"
|
||||
if [ -n "$UI_VERSION" ]; then
|
||||
echo "✓ UI version for Prowler $PROWLER_VERSION: $UI_VERSION"
|
||||
else
|
||||
echo "Warning: No SDK version found in prowler/CHANGELOG.md"
|
||||
echo "ℹ No UI version found for Prowler $PROWLER_VERSION in ui/CHANGELOG.md"
|
||||
fi
|
||||
|
||||
if [ -n "$MCP_VERSION" ]; then
|
||||
echo "Read MCP version from changelog: $MCP_VERSION"
|
||||
echo "✓ MCP version for Prowler $PROWLER_VERSION: $MCP_VERSION"
|
||||
else
|
||||
echo "Warning: No MCP version found in mcp_server/CHANGELOG.md"
|
||||
echo "ℹ No MCP version found for Prowler $PROWLER_VERSION in mcp_server/CHANGELOG.md"
|
||||
fi
|
||||
|
||||
echo "UI version: $UI_VERSION"
|
||||
echo "API version: $API_VERSION"
|
||||
echo "SDK version: $SDK_VERSION"
|
||||
echo "MCP version: $MCP_VERSION"
|
||||
|
||||
- name: Extract and combine changelog entries
|
||||
run: |
|
||||
set -e
|
||||
@@ -166,70 +163,54 @@ jobs:
|
||||
|
||||
# Remove --- separators
|
||||
sed -i '/^---$/d' "$output_file"
|
||||
|
||||
# Remove only trailing empty lines (not all empty lines)
|
||||
sed -i -e :a -e '/^\s*$/d;N;ba' "$output_file"
|
||||
}
|
||||
|
||||
# Calculate expected versions for this release
|
||||
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
|
||||
EXPECTED_UI_VERSION="1.${BASH_REMATCH[2]}.${BASH_REMATCH[3]}"
|
||||
EXPECTED_API_VERSION="1.$((${BASH_REMATCH[2]} + 1)).${BASH_REMATCH[3]}"
|
||||
|
||||
echo "Expected UI version for this release: $EXPECTED_UI_VERSION"
|
||||
echo "Expected API version for this release: $EXPECTED_API_VERSION"
|
||||
fi
|
||||
|
||||
# Determine if components have changes for this specific release
|
||||
# UI has changes if its current version matches what we expect for this release
|
||||
if [ -n "$UI_VERSION" ] && [ "$UI_VERSION" = "$EXPECTED_UI_VERSION" ]; then
|
||||
echo "HAS_UI_CHANGES=true" >> $GITHUB_ENV
|
||||
echo "✓ UI changes detected - version matches expected: $UI_VERSION"
|
||||
extract_changelog "ui/CHANGELOG.md" "$UI_VERSION" "ui_changelog.md"
|
||||
else
|
||||
echo "HAS_UI_CHANGES=false" >> $GITHUB_ENV
|
||||
echo "ℹ No UI changes for this release (current: $UI_VERSION, expected: $EXPECTED_UI_VERSION)"
|
||||
touch "ui_changelog.md"
|
||||
fi
|
||||
|
||||
# API has changes if its current version matches what we expect for this release
|
||||
if [ -n "$API_VERSION" ] && [ "$API_VERSION" = "$EXPECTED_API_VERSION" ]; then
|
||||
echo "HAS_API_CHANGES=true" >> $GITHUB_ENV
|
||||
echo "✓ API changes detected - version matches expected: $API_VERSION"
|
||||
extract_changelog "api/CHANGELOG.md" "$API_VERSION" "api_changelog.md"
|
||||
else
|
||||
echo "HAS_API_CHANGES=false" >> $GITHUB_ENV
|
||||
echo "ℹ No API changes for this release (current: $API_VERSION, expected: $EXPECTED_API_VERSION)"
|
||||
touch "api_changelog.md"
|
||||
fi
|
||||
|
||||
# SDK has changes if its current version matches the input version
|
||||
if [ -n "$SDK_VERSION" ] && [ "$SDK_VERSION" = "$PROWLER_VERSION" ]; then
|
||||
if [ -n "$SDK_VERSION" ]; then
|
||||
echo "HAS_SDK_CHANGES=true" >> $GITHUB_ENV
|
||||
echo "✓ SDK changes detected - version matches input: $SDK_VERSION"
|
||||
extract_changelog "prowler/CHANGELOG.md" "$PROWLER_VERSION" "prowler_changelog.md"
|
||||
HAS_SDK_CHANGES="true"
|
||||
echo "✓ SDK changes detected - version: $SDK_VERSION"
|
||||
extract_changelog "prowler/CHANGELOG.md" "$SDK_VERSION" "prowler_changelog.md"
|
||||
else
|
||||
echo "HAS_SDK_CHANGES=false" >> $GITHUB_ENV
|
||||
echo "ℹ No SDK changes for this release (current: $SDK_VERSION, input: $PROWLER_VERSION)"
|
||||
HAS_SDK_CHANGES="false"
|
||||
echo "ℹ No SDK changes for this release"
|
||||
touch "prowler_changelog.md"
|
||||
fi
|
||||
|
||||
# MCP has changes if the changelog references this Prowler version
|
||||
# Check if the changelog contains "(Prowler X.Y.Z)" or "(Prowler UNRELEASED)"
|
||||
if [ -f "mcp_server/CHANGELOG.md" ]; then
|
||||
MCP_PROWLER_REF=$(grep -m 1 "^## \[.*\] (Prowler" mcp_server/CHANGELOG.md | sed -E 's/.*\(Prowler ([^)]+)\).*/\1/' | tr -d '[:space:]')
|
||||
if [ "$MCP_PROWLER_REF" = "$PROWLER_VERSION" ] || [ "$MCP_PROWLER_REF" = "UNRELEASED" ]; then
|
||||
echo "HAS_MCP_CHANGES=true" >> $GITHUB_ENV
|
||||
echo "✓ MCP changes detected - Prowler reference: $MCP_PROWLER_REF (version: $MCP_VERSION)"
|
||||
extract_changelog "mcp_server/CHANGELOG.md" "$MCP_VERSION" "mcp_changelog.md"
|
||||
else
|
||||
echo "HAS_MCP_CHANGES=false" >> $GITHUB_ENV
|
||||
echo "ℹ No MCP changes for this release (Prowler reference: $MCP_PROWLER_REF, input: $PROWLER_VERSION)"
|
||||
touch "mcp_changelog.md"
|
||||
fi
|
||||
if [ -n "$API_VERSION" ]; then
|
||||
echo "HAS_API_CHANGES=true" >> $GITHUB_ENV
|
||||
HAS_API_CHANGES="true"
|
||||
echo "✓ API changes detected - version: $API_VERSION"
|
||||
extract_changelog "api/CHANGELOG.md" "$API_VERSION" "api_changelog.md"
|
||||
else
|
||||
echo "HAS_API_CHANGES=false" >> $GITHUB_ENV
|
||||
HAS_API_CHANGES="false"
|
||||
echo "ℹ No API changes for this release"
|
||||
touch "api_changelog.md"
|
||||
fi
|
||||
|
||||
if [ -n "$UI_VERSION" ]; then
|
||||
echo "HAS_UI_CHANGES=true" >> $GITHUB_ENV
|
||||
HAS_UI_CHANGES="true"
|
||||
echo "✓ UI changes detected - version: $UI_VERSION"
|
||||
extract_changelog "ui/CHANGELOG.md" "$UI_VERSION" "ui_changelog.md"
|
||||
else
|
||||
echo "HAS_UI_CHANGES=false" >> $GITHUB_ENV
|
||||
HAS_UI_CHANGES="false"
|
||||
echo "ℹ No UI changes for this release"
|
||||
touch "ui_changelog.md"
|
||||
fi
|
||||
|
||||
if [ -n "$MCP_VERSION" ]; then
|
||||
echo "HAS_MCP_CHANGES=true" >> $GITHUB_ENV
|
||||
HAS_MCP_CHANGES="true"
|
||||
echo "✓ MCP changes detected - version: $MCP_VERSION"
|
||||
extract_changelog "mcp_server/CHANGELOG.md" "$MCP_VERSION" "mcp_changelog.md"
|
||||
else
|
||||
echo "HAS_MCP_CHANGES=false" >> $GITHUB_ENV
|
||||
echo "ℹ No MCP changelog found"
|
||||
HAS_MCP_CHANGES="false"
|
||||
echo "ℹ No MCP changes for this release"
|
||||
touch "mcp_changelog.md"
|
||||
fi
|
||||
|
||||
@@ -325,6 +306,17 @@ jobs:
|
||||
fi
|
||||
echo "✓ api/src/backend/api/v1/views.py version: $CURRENT_API_VERSION"
|
||||
|
||||
- name: Verify API version in api/src/backend/api/specs/v1.yaml
|
||||
if: ${{ env.HAS_API_CHANGES == 'true' }}
|
||||
run: |
|
||||
CURRENT_API_VERSION=$(grep '^ version: ' api/src/backend/api/specs/v1.yaml | sed -E 's/ version: ([0-9]+\.[0-9]+\.[0-9]+)/\1/' | tr -d '[:space:]')
|
||||
API_VERSION_TRIMMED=$(echo "$API_VERSION" | tr -d '[:space:]')
|
||||
if [ "$CURRENT_API_VERSION" != "$API_VERSION_TRIMMED" ]; then
|
||||
echo "ERROR: API version mismatch in api/src/backend/api/specs/v1.yaml (expected: '$API_VERSION_TRIMMED', found: '$CURRENT_API_VERSION')"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ api/src/backend/api/specs/v1.yaml version: $CURRENT_API_VERSION"
|
||||
|
||||
- name: Update API prowler dependency for minor release
|
||||
if: ${{ env.PATCH_VERSION == '0' }}
|
||||
run: |
|
||||
|
||||
@@ -16,6 +16,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -44,30 +50,15 @@ env:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
container-build-push:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: 'false'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
@@ -87,32 +78,24 @@ jobs:
|
||||
run: |
|
||||
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
|
||||
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Extract major version
|
||||
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
|
||||
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Set version-specific tags
|
||||
case ${PROWLER_VERSION_MAJOR} in
|
||||
3)
|
||||
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
|
||||
;;
|
||||
4)
|
||||
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
|
||||
;;
|
||||
5)
|
||||
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v5 detected - tags: latest, stable"
|
||||
@@ -123,6 +106,53 @@ jobs:
|
||||
;;
|
||||
esac
|
||||
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
@@ -142,7 +172,8 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push SDK container for ${{ matrix.arch }}
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
@@ -150,43 +181,14 @@ jobs:
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}-${{ matrix.arch }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -212,24 +214,24 @@ jobs:
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.prowler_version }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.stable_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
@@ -239,13 +241,47 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ needs.setup.outputs.prowler_version }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
dispatch-v3-deployment:
|
||||
if: needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
needs: container-build-push
|
||||
if: needs.setup.outputs.prowler_version_major == '3'
|
||||
needs: [setup, container-build-push]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
@@ -272,4 +308,4 @@ jobs:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"release","tag":"${{ needs.container-build-push.outputs.prowler_version }}"}'
|
||||
client-payload: '{"version":"release","tag":"${{ needs.setup.outputs.prowler_version }}"}'
|
||||
|
||||
@@ -44,7 +44,16 @@ jobs:
|
||||
|
||||
sdk-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -82,22 +91,23 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build SDK container
|
||||
- name: Build SDK container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Scan SDK container with Trivy
|
||||
- name: Scan SDK container with Trivy for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -82,9 +82,110 @@ jobs:
|
||||
./tests/**/aws/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Resolve AWS services under test
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
id: aws-services
|
||||
shell: bash
|
||||
run: |
|
||||
python3 <<'PY'
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
dependents = {
|
||||
"acm": ["elb"],
|
||||
"autoscaling": ["dynamodb"],
|
||||
"awslambda": ["ec2", "inspector2"],
|
||||
"backup": ["dynamodb", "ec2", "rds"],
|
||||
"cloudfront": ["shield"],
|
||||
"cloudtrail": ["awslambda", "cloudwatch"],
|
||||
"cloudwatch": ["bedrock"],
|
||||
"ec2": ["dlm", "dms", "elbv2", "emr", "inspector2", "rds", "redshift", "route53", "shield", "ssm"],
|
||||
"ecr": ["inspector2"],
|
||||
"elb": ["shield"],
|
||||
"elbv2": ["shield"],
|
||||
"globalaccelerator": ["shield"],
|
||||
"iam": ["bedrock", "cloudtrail", "cloudwatch", "codebuild"],
|
||||
"kafka": ["firehose"],
|
||||
"kinesis": ["firehose"],
|
||||
"kms": ["kafka"],
|
||||
"organizations": ["iam", "servicecatalog"],
|
||||
"route53": ["shield"],
|
||||
"s3": ["bedrock", "cloudfront", "cloudtrail", "macie"],
|
||||
"ssm": ["ec2"],
|
||||
"vpc": ["awslambda", "ec2", "efs", "elasticache", "neptune", "networkfirewall", "rds", "redshift", "workspaces"],
|
||||
"waf": ["elbv2"],
|
||||
"wafv2": ["cognito", "elbv2"],
|
||||
}
|
||||
|
||||
changed_raw = """${{ steps.changed-aws.outputs.all_changed_files }}"""
|
||||
# all_changed_files is space-separated, not newline-separated
|
||||
# Strip leading "./" if present for consistent path handling
|
||||
changed_files = [Path(f.lstrip("./")) for f in changed_raw.split() if f]
|
||||
|
||||
services = set()
|
||||
run_all = False
|
||||
|
||||
for path in changed_files:
|
||||
path_str = path.as_posix()
|
||||
parts = path.parts
|
||||
if path_str.startswith("prowler/providers/aws/services/"):
|
||||
if len(parts) > 4 and "." not in parts[4]:
|
||||
services.add(parts[4])
|
||||
else:
|
||||
run_all = True
|
||||
elif path_str.startswith("tests/providers/aws/services/"):
|
||||
if len(parts) > 4 and "." not in parts[4]:
|
||||
services.add(parts[4])
|
||||
else:
|
||||
run_all = True
|
||||
elif path_str.startswith("prowler/providers/aws/") or path_str.startswith("tests/providers/aws/"):
|
||||
run_all = True
|
||||
|
||||
# Expand with direct dependent services (one level only)
|
||||
# We only test services that directly depend on the changed services,
|
||||
# not transitive dependencies (services that depend on dependents)
|
||||
original_services = set(services)
|
||||
for svc in original_services:
|
||||
for dep in dependents.get(svc, []):
|
||||
services.add(dep)
|
||||
|
||||
if run_all or not services:
|
||||
run_all = True
|
||||
services = set()
|
||||
|
||||
service_paths = " ".join(sorted(f"tests/providers/aws/services/{svc}" for svc in services))
|
||||
|
||||
output_lines = [
|
||||
f"run_all={'true' if run_all else 'false'}",
|
||||
f"services={' '.join(sorted(services))}",
|
||||
f"service_paths={service_paths}",
|
||||
]
|
||||
|
||||
with open(os.environ["GITHUB_OUTPUT"], "a") as gh_out:
|
||||
for line in output_lines:
|
||||
gh_out.write(line + "\n")
|
||||
|
||||
print(f"AWS changed files (filtered): {changed_raw or 'none'}")
|
||||
print(f"Run all AWS tests: {run_all}")
|
||||
if services:
|
||||
print(f"AWS service test paths: {service_paths}")
|
||||
else:
|
||||
print("AWS service test paths: none detected")
|
||||
PY
|
||||
|
||||
- name: Run AWS tests
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
run: |
|
||||
echo "AWS run_all=${{ steps.aws-services.outputs.run_all }}"
|
||||
echo "AWS service_paths='${{ steps.aws-services.outputs.service_paths }}'"
|
||||
|
||||
if [ "${{ steps.aws-services.outputs.run_all }}" = "true" ]; then
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
elif [ -z "${{ steps.aws-services.outputs.service_paths }}" ]; then
|
||||
echo "No AWS service paths detected; skipping AWS tests."
|
||||
else
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${{ steps.aws-services.outputs.service_paths }}
|
||||
fi
|
||||
|
||||
- name: Upload AWS coverage to Codecov
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
|
||||
@@ -10,6 +10,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -21,7 +27,7 @@ concurrency:
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./ui
|
||||
|
||||
@@ -44,9 +50,35 @@ jobs:
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: setup
|
||||
runs-on: ${{ matrix.runner }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Notify container push started
|
||||
id: slack-notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
container-build-push:
|
||||
needs: [setup, notify-release-started]
|
||||
if: always() && needs.setup.result == 'success' && (needs.notify-release-started.result == 'success' || needs.notify-release-started.result == 'skipped')
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
@@ -75,12 +107,13 @@ jobs:
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push UI container for ${{ matrix.arch }}
|
||||
if: github.event_name == 'push'
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ needs.setup.outputs.short-sha }}
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && format('v{0}', env.RELEASE_TAG) || needs.setup.outputs.short-sha }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
@@ -89,53 +122,10 @@ jobs:
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Build and push UI container for ${{ matrix.arch }}
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ github.event_name == 'release' && format('v{0}', env.RELEASE_TAG) || needs.setup.outputs.short_sha }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release'
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -158,7 +148,7 @@ jobs:
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
@@ -178,6 +168,40 @@ jobs:
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
notify-release-completed:
|
||||
if: always() && needs.notify-release-started.result == 'success' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Determine overall outcome
|
||||
id: outcome
|
||||
run: |
|
||||
if [[ "${{ needs.container-build-push.result }}" == "success" && "${{ needs.create-manifest.result }}" == "success" ]]; then
|
||||
echo "outcome=success" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "outcome=failure" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Notify container push completed
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
MESSAGE_TS: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -20,6 +20,7 @@ env:
|
||||
|
||||
jobs:
|
||||
ui-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
@@ -43,7 +44,17 @@ jobs:
|
||||
ignore: DL3018
|
||||
|
||||
ui-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -67,7 +78,7 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build UI container
|
||||
- name: Build UI container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
@@ -75,17 +86,18 @@ jobs:
|
||||
target: prod
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
|
||||
- name: Scan UI container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
- name: Scan UI container with Trivy for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -10,6 +10,7 @@ on:
|
||||
- 'ui/**'
|
||||
|
||||
jobs:
|
||||
|
||||
e2e-tests:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
@@ -33,12 +34,50 @@ jobs:
|
||||
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
|
||||
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
|
||||
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
|
||||
E2E_NEW_PASSWORD: ${{ secrets.E2E_NEW_PASSWORD }}
|
||||
E2E_KUBERNETES_CONTEXT: 'kind-kind'
|
||||
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
|
||||
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
|
||||
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
|
||||
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
|
||||
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
|
||||
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
|
||||
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
|
||||
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
|
||||
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
|
||||
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
|
||||
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
|
||||
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
|
||||
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
|
||||
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
|
||||
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
|
||||
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Create k8s Kind Cluster
|
||||
uses: helm/kind-action@v1
|
||||
with:
|
||||
cluster_name: kind
|
||||
- name: Modify kubeconfig
|
||||
run: |
|
||||
# Modify the kubeconfig to use the kind cluster server to https://kind-control-plane:6443
|
||||
# from worker service into docker-compose.yml
|
||||
kubectl config set-cluster kind-kind --server=https://kind-control-plane:6443
|
||||
kubectl config view
|
||||
- name: Add network kind to docker compose
|
||||
run: |
|
||||
# Add the network kind to the docker compose to interconnect to kind cluster
|
||||
yq -i '.networks.kind.external = true' docker-compose.yml
|
||||
# Add network kind to worker service and default network too
|
||||
yq -i '.services.worker.networks = ["kind","default"]' docker-compose.yml
|
||||
- name: Fix API data directory permissions
|
||||
run: docker run --rm -v $(pwd)/_data/api:/data alpine chown -R 1000:1000 /data
|
||||
- name: Add AWS credentials for testing AWS SDK Default Adding Provider
|
||||
run: |
|
||||
echo "Adding AWS credentials for testing AWS SDK Default Adding Provider..."
|
||||
echo "AWS_ACCESS_KEY_ID=${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}" >> .env
|
||||
echo "AWS_SECRET_ACCESS_KEY=${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}" >> .env
|
||||
- name: Start API services
|
||||
run: |
|
||||
# Override docker-compose image tag to use latest instead of stable
|
||||
@@ -78,29 +117,42 @@ jobs:
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '20.x'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('ui/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
- name: Install UI dependencies
|
||||
working-directory: ./ui
|
||||
run: npm ci
|
||||
run: pnpm install --frozen-lockfile
|
||||
- name: Build UI application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
run: pnpm run build
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/package-lock.json') }}
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-playwright-
|
||||
- name: Install Playwright browsers
|
||||
working-directory: ./ui
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: npm run test:e2e:install
|
||||
run: pnpm run test:e2e:install
|
||||
- name: Run E2E tests
|
||||
working-directory: ./ui
|
||||
run: npm run test:e2e
|
||||
run: pnpm run test:e2e
|
||||
- name: Upload test reports
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: failure()
|
||||
|
||||
@@ -48,17 +48,36 @@ jobs:
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
|
||||
- name: Setup pnpm
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('ui/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm ci
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run healthcheck
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm run healthcheck
|
||||
run: pnpm run healthcheck
|
||||
|
||||
- name: Build application
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm run build
|
||||
run: pnpm run build
|
||||
|
||||
@@ -150,9 +150,5 @@ _data/
|
||||
# Claude
|
||||
CLAUDE.md
|
||||
|
||||
# MCP Server
|
||||
mcp_server/prowler_mcp_server/prowler_app/server.py
|
||||
mcp_server/prowler_mcp_server/prowler_app/utils/schema.yaml
|
||||
|
||||
# Compliance report
|
||||
*.pdf
|
||||
|
||||
@@ -82,7 +82,6 @@ repos:
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.13.0-beta
|
||||
hooks:
|
||||
@@ -129,9 +128,20 @@ repos:
|
||||
|
||||
- id: ui-checks
|
||||
name: UI - Husky Pre-commit
|
||||
description: "Run UI pre-commit checks (Claude Code validation + healthcheck)"
|
||||
description: "Run UI pre-commit checks (healthcheck + build)"
|
||||
entry: bash -c 'cd ui && .husky/pre-commit'
|
||||
language: system
|
||||
files: '^ui/.*\.(ts|tsx|js|jsx|json|css)$'
|
||||
pass_filenames: false
|
||||
verbose: true
|
||||
|
||||
- id: gga
|
||||
name: Gentleman Guardian Angel (AI Code Review)
|
||||
description: "AI-powered code review - runs last after all formatters/linters"
|
||||
entry: ./scripts/gga-review.sh
|
||||
language: system
|
||||
files: '\.(ts|tsx|js|jsx|py)$'
|
||||
exclude: '(\.test\.|\.spec\.|_test\.py|test_.*\.py|conftest\.py|\.d\.ts)'
|
||||
pass_filenames: false
|
||||
stages: ["pre-commit"]
|
||||
verbose: true
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
# Code Review Rules
|
||||
|
||||
## References
|
||||
|
||||
- UI details: `ui/AGENTS.md`
|
||||
- SDK details: `prowler/AGENTS.md`
|
||||
- MCP details: `mcp_server/AGENTS.md`
|
||||
|
||||
---
|
||||
|
||||
## ALL FILES
|
||||
|
||||
REJECT if:
|
||||
|
||||
- Hardcoded secrets/credentials
|
||||
- `any` type (TypeScript) or missing type hints (Python)
|
||||
- Code duplication (violates DRY)
|
||||
- Silent error handling (no logging)
|
||||
|
||||
---
|
||||
|
||||
## TypeScript/React (ui/)
|
||||
|
||||
REJECT if:
|
||||
|
||||
- `import React` or `import * as React` → use `import { useState }`
|
||||
- Union types `type X = "a" | "b"` → use `const X = {...} as const`
|
||||
- `var()` or hex colors in className → use Tailwind classes
|
||||
- `useMemo` or `useCallback` without justification (React 19 Compiler)
|
||||
- `z.string().email()` → use `z.email()` (Zod v4)
|
||||
- `z.string().nonempty()` → use `z.string().min(1)` (Zod v4)
|
||||
- Missing `"use client"` in client components
|
||||
- Missing `"use server"` in server actions
|
||||
- Images without `alt` attribute
|
||||
- Interactive elements without `aria` labels
|
||||
- Non-semantic HTML when semantic exists
|
||||
|
||||
PREFER:
|
||||
|
||||
- `components/shadcn/` over custom components
|
||||
- `cn()` for conditional/merged classes
|
||||
- Local files if used 1 place, `components/shared/` if 2+
|
||||
- Responsive classes: `sm:`, `md:`, `lg:`, `xl:`
|
||||
|
||||
EXCEPTION:
|
||||
|
||||
- `var()` allowed in chart/graph component props (not className)
|
||||
|
||||
---
|
||||
|
||||
## Python (prowler/, mcp_server/)
|
||||
|
||||
REJECT if:
|
||||
|
||||
- Missing type hints on public functions
|
||||
- Missing docstrings on classes/public methods
|
||||
- Bare `except:` without specific exception
|
||||
- `print()` instead of `logger`
|
||||
|
||||
REQUIRE for SDK checks:
|
||||
|
||||
- Inherit from `Check`
|
||||
- `execute()` returns `list[CheckReport]`
|
||||
- `report.status` = `"PASS"` | `"FAIL"`
|
||||
- `.metadata.json` file exists
|
||||
|
||||
REQUIRE for MCP tools:
|
||||
|
||||
- Extend `BaseTool` (auto-registration)
|
||||
- `MinimalSerializerMixin` for responses
|
||||
- `from_api_response()` for API transforms
|
||||
|
||||
---
|
||||
|
||||
## Response Format
|
||||
|
||||
FIRST LINE must be exactly:
|
||||
|
||||
```
|
||||
STATUS: PASSED
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
STATUS: FAILED
|
||||
```
|
||||
|
||||
If FAILED, list: `file:line - rule - issue`
|
||||
@@ -12,6 +12,7 @@ ENV TRIVY_VERSION=${TRIVY_VERSION}
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
wget libicu72 libunwind8 libssl3 libcurl4 ca-certificates apt-transport-https gnupg \
|
||||
build-essential pkg-config libzstd-dev zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install PowerShell
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
<b><i>Prowler</b> is the Open Cloud Security platform trusted by thousands to automate security and compliance in any cloud environment. With hundreds of ready-to-use checks and compliance frameworks, Prowler delivers real-time, customizable monitoring and seamless integrations, making cloud security simple, scalable, and cost-effective for organizations of any size.
|
||||
</p>
|
||||
<p align="center">
|
||||
<b>Learn more at <a href="https://prowler.com">prowler.com</i></b>
|
||||
<b>Secure ANY cloud at AI Speed at <a href="https://prowler.com">prowler.com</i></b>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -23,6 +23,7 @@
|
||||
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/toniblyx/prowler"></a>
|
||||
<a href="https://gallery.ecr.aws/prowler-cloud/prowler"><img width="120" height=19" alt="AWS ECR Gallery" src="https://user-images.githubusercontent.com/3985464/151531396-b6535a68-c907-44eb-95a1-a09508178616.png"></a>
|
||||
<a href="https://codecov.io/gh/prowler-cloud/prowler"><img src="https://codecov.io/gh/prowler-cloud/prowler/graph/badge.svg?token=OflBGsdpDl"/></a>
|
||||
<a href="https://insights.linuxfoundation.org/project/prowler-cloud-prowler"><img src="https://insights.linuxfoundation.org/api/badge/health-score?project=prowler-cloud-prowler"/></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/prowler-cloud/prowler/releases"><img alt="Version" src="https://img.shields.io/github/v/release/prowler-cloud/prowler"></a>
|
||||
@@ -35,28 +36,32 @@
|
||||
</p>
|
||||
<hr>
|
||||
<p align="center">
|
||||
<img align="center" src="/docs/img/prowler-cli-quick.gif" width="100%" height="100%">
|
||||
<img align="center" src="/docs/img/prowler-cloud.gif" width="100%" height="100%">
|
||||
</p>
|
||||
|
||||
# Description
|
||||
|
||||
**Prowler** is an open-source security tool designed to assess and enforce security best practices across AWS, Azure, Google Cloud, and Kubernetes. It supports tasks such as security audits, incident response, continuous monitoring, system hardening, forensic readiness, and remediation processes.
|
||||
**Prowler** is the world’s most widely used _open-source cloud security platform_ that automates security and compliance across **any cloud environment**. With hundreds of ready-to-use security checks, remediation guidance, and compliance frameworks, Prowler is built to _“Secure ANY cloud at AI Speed”_. Prowler delivers **AI-driven**, **customizable**, and **easy-to-use** assessments, dashboards, reports, and integrations, making cloud security **simple**, **scalable**, and **cost-effective** for organizations of any size.
|
||||
|
||||
Prowler includes hundreds of built-in controls to ensure compliance with standards and frameworks, including:
|
||||
|
||||
- **Industry Standards:** CIS, NIST 800, NIST CSF, and CISA
|
||||
- **Regulatory Compliance and Governance:** RBI, FedRAMP, and PCI-DSS
|
||||
- **Prowler ThreatScore:** Weighted risk prioritization scoring that helps you focus on the most critical security findings first
|
||||
- **Industry Standards:** CIS, NIST 800, NIST CSF, CISA, and MITRE ATT&CK
|
||||
- **Regulatory Compliance and Governance:** RBI, FedRAMP, PCI-DSS, and NIS2
|
||||
- **Frameworks for Sensitive Data and Privacy:** GDPR, HIPAA, and FFIEC
|
||||
- **Frameworks for Organizational Governance and Quality Control:** SOC2 and GXP
|
||||
- **AWS-Specific Frameworks:** AWS Foundational Technical Review (FTR) and AWS Well-Architected Framework (Security Pillar)
|
||||
- **National Security Standards:** ENS (Spanish National Security Scheme)
|
||||
- **Frameworks for Organizational Governance and Quality Control:** SOC2, GXP, and ISO 27001
|
||||
- **Cloud-Specific Frameworks:** AWS Foundational Technical Review (FTR), AWS Well-Architected Framework, and BSI C5
|
||||
- **National Security Standards:** ENS (Spanish National Security Scheme) and KISA ISMS-P (Korean)
|
||||
- **Custom Security Frameworks:** Tailored to your needs
|
||||
|
||||
## Prowler App
|
||||
## Prowler App / Prowler Cloud
|
||||
|
||||
Prowler App is a web-based application that simplifies running Prowler across your cloud provider accounts. It provides a user-friendly interface to visualize the results and streamline your security assessments.
|
||||
Prowler App / [Prowler Cloud](https://cloud.prowler.com/) is a web-based application that simplifies running Prowler across your cloud provider accounts. It provides a user-friendly interface to visualize the results and streamline your security assessments.
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
>For more details, refer to the [Prowler App Documentation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-app-installation)
|
||||
|
||||
@@ -82,15 +87,16 @@ prowler dashboard
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) | Support | Interface |
|
||||
|---|---|---|---|---|---|---|
|
||||
| AWS | 576 | 82 | 39 | 10 | Official | UI, API, CLI |
|
||||
| GCP | 79 | 13 | 13 | 3 | Official | UI, API, CLI |
|
||||
| Azure | 162 | 19 | 13 | 4 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 5 | 7 | Official | UI, API, CLI |
|
||||
| GitHub | 17 | 2 | 1 | 0 | Official | Stable | UI, API, CLI |
|
||||
| AWS | 584 | 85 | 40 | 17 | Official | UI, API, CLI |
|
||||
| GCP | 89 | 17 | 14 | 5 | Official | UI, API, CLI |
|
||||
| Azure | 169 | 22 | 15 | 8 | Official | UI, API, CLI |
|
||||
| Kubernetes | 84 | 7 | 6 | 9 | Official | UI, API, CLI |
|
||||
| GitHub | 20 | 2 | 1 | 2 | Official | UI, API, CLI |
|
||||
| M365 | 70 | 7 | 3 | 2 | Official | UI, API, CLI |
|
||||
| OCI | 51 | 13 | 1 | 10 | Official | UI, API, CLI |
|
||||
| OCI | 52 | 15 | 1 | 12 | Official | UI, API, CLI |
|
||||
| Alibaba Cloud | 63 | 10 | 1 | 9 | Official | CLI |
|
||||
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | UI, API, CLI |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 0 | Official | CLI, API |
|
||||
| MongoDB Atlas | 10 | 4 | 0 | 3 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
@@ -153,7 +159,7 @@ You can find more information in the [Troubleshooting](./docs/troubleshooting.md
|
||||
|
||||
* `git` installed.
|
||||
* `poetry` v2 installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
* `npm` installed: [npm installation](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm).
|
||||
* `pnpm` installed: [pnpm installation](https://pnpm.io/installation).
|
||||
* `Docker Compose` installed: https://docs.docker.com/compose/install/.
|
||||
|
||||
**Commands to run the API**
|
||||
@@ -209,9 +215,9 @@ python -m celery -A config.celery beat -l info --scheduler django_celery_beat.sc
|
||||
``` console
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/ui
|
||||
npm install
|
||||
npm run build
|
||||
npm start
|
||||
pnpm install
|
||||
pnpm run build
|
||||
pnpm start
|
||||
```
|
||||
|
||||
> Once configured, access the Prowler App at http://localhost:3000. Sign up using your email and password to get started.
|
||||
|
||||
@@ -2,6 +2,59 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.17.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
- New endpoint to retrieve and overview of the categories based on finding severities [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
- Endpoints `GET /findings` and `GET /findings/latests` can now use the category filter [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
|
||||
### Changed
|
||||
- Endpoint `GET /overviews/attack-surfaces` no longer returns the related check IDs [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
- OpenAI provider to only load chat-compatible models with tool calling support [(#9523)](https://github.com/prowler-cloud/prowler/pull/9523)
|
||||
- Increased execution delay for the first scheduled scan tasks to 5 seconds[(#9558)](https://github.com/prowler-cloud/prowler/pull/9558)
|
||||
|
||||
### Fixed
|
||||
- Make `scan_id` a required filter in the compliance overview endpoint [(#9560)](https://github.com/prowler-cloud/prowler/pull/9560)
|
||||
|
||||
---
|
||||
|
||||
## [1.16.1] (Prowler v5.15.1)
|
||||
|
||||
### Fixed
|
||||
- Race condition in scheduled scan creation by adding countdown to task [(#9516)](https://github.com/prowler-cloud/prowler/pull/9516)
|
||||
|
||||
## [1.16.0] (Prowler v5.15.0)
|
||||
|
||||
### Added
|
||||
- New endpoint to retrieve an overview of the attack surfaces [(#9309)](https://github.com/prowler-cloud/prowler/pull/9309)
|
||||
- New endpoint `GET /api/v1/overviews/findings_severity/timeseries` to retrieve daily aggregated findings by severity level [(#9363)](https://github.com/prowler-cloud/prowler/pull/9363)
|
||||
- Lighthouse AI support for Amazon Bedrock API key [(#9343)](https://github.com/prowler-cloud/prowler/pull/9343)
|
||||
- Exception handler for provider deletions during scans [(#9414)](https://github.com/prowler-cloud/prowler/pull/9414)
|
||||
- Support to use admin credentials through the read replica database [(#9440)](https://github.com/prowler-cloud/prowler/pull/9440)
|
||||
|
||||
### Changed
|
||||
- Error messages from Lighthouse celery tasks [(#9165)](https://github.com/prowler-cloud/prowler/pull/9165)
|
||||
- Restore the compliance overview endpoint's mandatory filters [(#9338)](https://github.com/prowler-cloud/prowler/pull/9338)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.2] (Prowler v5.14.2)
|
||||
|
||||
### Fixed
|
||||
- Unique constraint violation during compliance overviews task [(#9436)](https://github.com/prowler-cloud/prowler/pull/9436)
|
||||
- Division by zero error in ENS PDF report when all requirements are manual [(#9443)](https://github.com/prowler-cloud/prowler/pull/9443)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.1] (Prowler v5.14.1)
|
||||
|
||||
### Fixed
|
||||
- Fix typo in PDF reporting [(#9345)](https://github.com/prowler-cloud/prowler/pull/9345)
|
||||
- Fix IaC provider initialization failure when mutelist processor is configured [(#9331)](https://github.com/prowler-cloud/prowler/pull/9331)
|
||||
- Match logic for ThreatScore when counting findings [(#9348)](https://github.com/prowler-cloud/prowler/pull/9348)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.0] (Prowler v5.14.0)
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "about-time"
|
||||
@@ -2468,6 +2468,72 @@ files = [
|
||||
{file = "frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gevent"
|
||||
version = "25.9.1"
|
||||
description = "Coroutine-based network library"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "gevent-25.9.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:856b990be5590e44c3a3dc6c8d48a40eaccbb42e99d2b791d11d1e7711a4297e"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:fe1599d0b30e6093eb3213551751b24feeb43db79f07e89d98dd2f3330c9063e"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:f0d8b64057b4bf1529b9ef9bd2259495747fba93d1f836c77bfeaacfec373fd0"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b56cbc820e3136ba52cd690bdf77e47a4c239964d5f80dc657c1068e0fe9521c"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c5fa9ce5122c085983e33e0dc058f81f5264cebe746de5c401654ab96dddfca8"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:03c74fec58eda4b4edc043311fca8ba4f8744ad1632eb0a41d5ec25413581975"},
|
||||
{file = "gevent-25.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a8ae9f895e8651d10b0a8328a61c9c53da11ea51b666388aa99b0ce90f9fdc27"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5aff9e8342dc954adb9c9c524db56c2f3557999463445ba3d9cbe3dada7b7"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1cdf6db28f050ee103441caa8b0448ace545364f775059d5e2de089da975c457"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:812debe235a8295be3b2a63b136c2474241fa5c58af55e6a0f8cfc29d4936235"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b28b61ff9216a3d73fe8f35669eefcafa957f143ac534faf77e8a19eb9e6883a"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5e4b6278b37373306fc6b1e5f0f1cf56339a1377f67c35972775143d8d7776ff"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d99f0cb2ce43c2e8305bf75bee61a8bde06619d21b9d0316ea190fc7a0620a56"},
|
||||
{file = "gevent-25.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:72152517ecf548e2f838c61b4be76637d99279dbaa7e01b3924df040aa996586"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:46b188248c84ffdec18a686fcac5dbb32365d76912e14fda350db5dc0bfd4f86"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f2b54ea3ca6f0c763281cd3f96010ac7e98c2e267feb1221b5a26e2ca0b9a692"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7a834804ac00ed8a92a69d3826342c677be651b1c3cd66cc35df8bc711057aa2"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:323a27192ec4da6b22a9e51c3d9d896ff20bc53fdc9e45e56eaab76d1c39dd74"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ea78b39a2c51d47ff0f130f4c755a9a4bbb2dd9721149420ad4712743911a51"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dc45cd3e1cc07514a419960af932a62eb8515552ed004e56755e4bf20bad30c5"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34e01e50c71eaf67e92c186ee0196a039d6e4f4b35670396baed4a2d8f1b347f"},
|
||||
{file = "gevent-25.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acd6bcd5feabf22c7c5174bd3b9535ee9f088d2bbce789f740ad8d6554b18f3"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:4f84591d13845ee31c13f44bdf6bd6c3dbf385b5af98b2f25ec328213775f2ed"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9cdbb24c276a2d0110ad5c978e49daf620b153719ac8a548ce1250a7eb1b9245"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:88b6c07169468af631dcf0fdd3658f9246d6822cc51461d43f7c44f28b0abb82"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b7bb0e29a7b3e6ca9bed2394aa820244069982c36dc30b70eb1004dd67851a48"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2951bb070c0ee37b632ac9134e4fdaad70d2e660c931bb792983a0837fe5b7d7"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4e17c2d57e9a42e25f2a73d297b22b60b2470a74be5a515b36c984e1a246d47"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d94936f8f8b23d9de2251798fcb603b84f083fdf0d7f427183c1828fb64f117"},
|
||||
{file = "gevent-25.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb51c5f9537b07da673258b4832f6635014fee31690c3f0944d34741b69f92fa"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:1a3fe4ea1c312dbf6b375b416925036fe79a40054e6bf6248ee46526ea628be1"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0adb937f13e5fb90cca2edf66d8d7e99d62a299687400ce2edee3f3504009356"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:427f869a2050a4202d93cf7fd6ab5cffb06d3e9113c10c967b6e2a0d45237cb8"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c049880175e8c93124188f9d926af0a62826a3b81aa6d3074928345f8238279e"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b5a67a0974ad9f24721034d1e008856111e0535f1541499f72a733a73d658d1c"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1d0f5d8d73f97e24ea8d24d8be0f51e0cf7c54b8021c1fddb580bf239474690f"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ddd3ff26e5c4240d3fbf5516c2d9d5f2a998ef87cfb73e1429cfaeaaec860fa6"},
|
||||
{file = "gevent-25.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:bb63c0d6cb9950cc94036a4995b9cc4667b8915366613449236970f4394f94d7"},
|
||||
{file = "gevent-25.9.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f18f80aef6b1f6907219affe15b36677904f7cfeed1f6a6bc198616e507ae2d7"},
|
||||
{file = "gevent-25.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b274a53e818124a281540ebb4e7a2c524778f745b7a99b01bdecf0ca3ac0ddb0"},
|
||||
{file = "gevent-25.9.1-cp39-cp39-win32.whl", hash = "sha256:c6c91f7e33c7f01237755884316110ee7ea076f5bdb9aa0982b6dc63243c0a38"},
|
||||
{file = "gevent-25.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:012a44b0121f3d7c800740ff80351c897e85e76a7e4764690f35c5ad9ec17de5"},
|
||||
{file = "gevent-25.9.1.tar.gz", hash = "sha256:adf9cd552de44a4e6754c51ff2e78d9193b7fa6eab123db9578a210e657235dd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cffi = {version = ">=1.17.1", markers = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\""}
|
||||
greenlet = {version = ">=3.2.2", markers = "platform_python_implementation == \"CPython\""}
|
||||
"zope.event" = "*"
|
||||
"zope.interface" = "*"
|
||||
|
||||
[package.extras]
|
||||
dnspython = ["dnspython (>=1.16.0,<2.0) ; python_version < \"3.10\"", "idna ; python_version < \"3.10\""]
|
||||
docs = ["furo", "repoze.sphinx.autointerface", "sphinx", "sphinxcontrib-programoutput", "zope.schema"]
|
||||
monitor = ["psutil (>=5.7.0) ; sys_platform != \"win32\" or platform_python_implementation == \"CPython\""]
|
||||
recommended = ["cffi (>=1.17.1) ; platform_python_implementation == \"CPython\"", "dnspython (>=1.16.0,<2.0) ; python_version < \"3.10\"", "idna ; python_version < \"3.10\"", "psutil (>=5.7.0) ; sys_platform != \"win32\" or platform_python_implementation == \"CPython\""]
|
||||
test = ["cffi (>=1.17.1) ; platform_python_implementation == \"CPython\"", "coverage (>=5.0) ; sys_platform != \"win32\"", "dnspython (>=1.16.0,<2.0) ; python_version < \"3.10\"", "idna ; python_version < \"3.10\"", "objgraph", "psutil (>=5.7.0) ; sys_platform != \"win32\" or platform_python_implementation == \"CPython\"", "requests"]
|
||||
|
||||
[[package]]
|
||||
name = "google-api-core"
|
||||
version = "2.25.1"
|
||||
@@ -2601,6 +2667,87 @@ files = [
|
||||
dev = ["pytest"]
|
||||
docs = ["sphinx", "sphinx-autobuild"]
|
||||
|
||||
[[package]]
|
||||
name = "greenlet"
|
||||
version = "3.2.4"
|
||||
description = "Lightweight in-process concurrent programming"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "platform_python_implementation == \"CPython\""
|
||||
files = [
|
||||
{file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"},
|
||||
{file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"},
|
||||
{file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"},
|
||||
{file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"},
|
||||
{file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"},
|
||||
{file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"},
|
||||
{file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"},
|
||||
{file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx", "furo"]
|
||||
test = ["objgraph", "psutil", "setuptools"]
|
||||
|
||||
[[package]]
|
||||
name = "gunicorn"
|
||||
version = "23.0.0"
|
||||
@@ -6857,7 +7004,69 @@ enabler = ["pytest-enabler (>=2.2)"]
|
||||
test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
|
||||
type = ["pytest-mypy"]
|
||||
|
||||
[[package]]
|
||||
name = "zope-event"
|
||||
version = "6.1"
|
||||
description = "Very basic event publishing system"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "zope_event-6.1-py3-none-any.whl", hash = "sha256:0ca78b6391b694272b23ec1335c0294cc471065ed10f7f606858fc54566c25a0"},
|
||||
{file = "zope_event-6.1.tar.gz", hash = "sha256:6052a3e0cb8565d3d4ef1a3a7809336ac519bc4fe38398cb8d466db09adef4f0"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx"]
|
||||
test = ["zope.testrunner (>=6.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "zope-interface"
|
||||
version = "8.1.1"
|
||||
description = "Interfaces for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c6b12b656c7d7e3d79cad8e2afc4a37eae6b6076e2c209a33345143148e435e"},
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:557c0f1363c300db406e9eeaae8ab6d1ba429d4fed60d8ab7dadab5ca66ccd35"},
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:127b0e4c873752b777721543cf8525b3db5e76b88bd33bab807f03c568e9003f"},
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e0892c9d2dd47b45f62d1861bcae8b427fcc49b4a04fff67f12c5c55e56654d7"},
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff8a92dc8c8a2c605074e464984e25b9b5a8ac9b2a0238dd73a0f374df59a77e"},
|
||||
{file = "zope_interface-8.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:54627ddf6034aab1f506ba750dd093f67d353be6249467d720e9f278a578efe5"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e8a0fdd5048c1bb733e4693eae9bc4145a19419ea6a1c95299318a93fe9f3d72"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a4cb0ea75a26b606f5bc8524fbce7b7d8628161b6da002c80e6417ce5ec757c0"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:c267b00b5a49a12743f5e1d3b4beef45479d696dab090f11fe3faded078a5133"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e25d3e2b9299e7ec54b626573673bdf0d740cf628c22aef0a3afef85b438aa54"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:63db1241804417aff95ac229c13376c8c12752b83cc06964d62581b493e6551b"},
|
||||
{file = "zope_interface-8.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:9639bf4ed07b5277fb231e54109117c30d608254685e48a7104a34618bcbfc83"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a16715808408db7252b8c1597ed9008bdad7bf378ed48eb9b0595fad4170e49d"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce6b58752acc3352c4aa0b55bbeae2a941d61537e6afdad2467a624219025aae"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:807778883d07177713136479de7fd566f9056a13aef63b686f0ab4807c6be259"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50e5eb3b504a7d63dc25211b9298071d5b10a3eb754d6bf2f8ef06cb49f807ab"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eee6f93b2512ec9466cf30c37548fd3ed7bc4436ab29cd5943d7a0b561f14f0f"},
|
||||
{file = "zope_interface-8.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:80edee6116d569883c58ff8efcecac3b737733d646802036dc337aa839a5f06b"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:84f9be6d959640de9da5d14ac1f6a89148b16da766e88db37ed17e936160b0b1"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:531fba91dcb97538f70cf4642a19d6574269460274e3f6004bba6fe684449c51"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:fc65f5633d5a9583ee8d88d1f5de6b46cd42c62e47757cfe86be36fb7c8c4c9b"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:efef80ddec4d7d99618ef71bc93b88859248075ca2e1ae1c78636654d3d55533"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:49aad83525eca3b4747ef51117d302e891f0042b06f32aa1c7023c62642f962b"},
|
||||
{file = "zope_interface-8.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:71cf329a21f98cb2bd9077340a589e316ac8a415cac900575a32544b3dffcb98"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-macosx_10_9_x86_64.whl", hash = "sha256:da311e9d253991ca327601f47c4644d72359bac6950fbb22f971b24cd7850f8c"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3fb25fca0442c7fb93c4ee40b42e3e033fef2f648730c4b7ae6d43222a3e8946"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:bac588d0742b4e35efb7c7df1dacc0397b51ed37a17d4169a38019a1cebacf0a"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3d1f053d2d5e2b393e619bce1e55954885c2e63969159aa521839e719442db49"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:64a1ad7f4cb17d948c6bdc525a1d60c0e567b2526feb4fa38b38f249961306b8"},
|
||||
{file = "zope_interface-8.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:169214da1b82b7695d1a36f92d70b11166d66b6b09d03df35d150cc62ac52276"},
|
||||
{file = "zope_interface-8.1.1.tar.gz", hash = "sha256:51b10e6e8e238d719636a401f44f1e366146912407b58453936b781a19be19ec"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"]
|
||||
test = ["coverage[toml]", "zope.event", "zope.testing"]
|
||||
testing = ["coverage[toml]", "zope.event", "zope.testing"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "943e2cd6b87229704550d4e140b36509fb9f58896ebb5834b9fbabe28a9ee92f"
|
||||
content-hash = "77ef098291cb8631565a1ab5027ce33e7fcb5a04883dc7160bf373eac9e1fb49"
|
||||
|
||||
@@ -35,7 +35,8 @@ dependencies = [
|
||||
"markdown (>=3.9,<4.0)",
|
||||
"drf-simple-apikey (==2.2.1)",
|
||||
"matplotlib (>=3.10.6,<4.0.0)",
|
||||
"reportlab (>=4.4.4,<5.0.0)"
|
||||
"reportlab (>=4.4.4,<5.0.0)",
|
||||
"gevent (>=25.9.1,<26.0.0)"
|
||||
]
|
||||
description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
@@ -43,7 +44,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.15.0"
|
||||
version = "1.16.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -26,6 +26,7 @@ class MainRouter:
|
||||
default_db = "default"
|
||||
admin_db = "admin"
|
||||
replica_db = "replica"
|
||||
admin_replica_db = "admin_replica"
|
||||
|
||||
def db_for_read(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
@@ -49,7 +50,12 @@ class MainRouter:
|
||||
|
||||
def allow_relation(self, obj1, obj2, **hints): # noqa: F841
|
||||
# Allow relations when both objects originate from allowed connectors
|
||||
allowed_dbs = {self.default_db, self.admin_db, self.replica_db}
|
||||
allowed_dbs = {
|
||||
self.default_db,
|
||||
self.admin_db,
|
||||
self.replica_db,
|
||||
self.admin_replica_db,
|
||||
}
|
||||
if {obj1._state.db, obj2._state.db} <= allowed_dbs:
|
||||
return True
|
||||
return None
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
import uuid
|
||||
from functools import wraps
|
||||
|
||||
from django.db import connection, transaction
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import IntegrityError, connection, transaction
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY, rls_transaction
|
||||
from api.exceptions import ProviderDeletedException
|
||||
from api.models import Provider, Scan
|
||||
|
||||
|
||||
def set_tenant(func=None, *, keep_tenant=False):
|
||||
@@ -66,3 +70,49 @@ def set_tenant(func=None, *, keep_tenant=False):
|
||||
return decorator
|
||||
else:
|
||||
return decorator(func)
|
||||
|
||||
|
||||
def handle_provider_deletion(func):
|
||||
"""
|
||||
Decorator that raises ProviderDeletedException if provider was deleted during execution.
|
||||
|
||||
Catches ObjectDoesNotExist and IntegrityError, checks if provider still exists,
|
||||
and raises ProviderDeletedException if not. Otherwise, re-raises original exception.
|
||||
|
||||
Requires tenant_id and provider_id in kwargs.
|
||||
|
||||
Example:
|
||||
@shared_task
|
||||
@handle_provider_deletion
|
||||
def scan_task(scan_id, tenant_id, provider_id):
|
||||
...
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except (ObjectDoesNotExist, IntegrityError):
|
||||
tenant_id = kwargs.get("tenant_id")
|
||||
provider_id = kwargs.get("provider_id")
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
if provider_id is None:
|
||||
scan_id = kwargs.get("scan_id")
|
||||
if scan_id is None:
|
||||
raise AssertionError(
|
||||
"This task does not have provider or scan in the kwargs"
|
||||
)
|
||||
scan = Scan.objects.filter(pk=scan_id).first()
|
||||
if scan is None:
|
||||
raise ProviderDeletedException(
|
||||
f"Provider for scan '{scan_id}' was deleted during the scan"
|
||||
) from None
|
||||
provider_id = str(scan.provider_id)
|
||||
if not Provider.objects.filter(pk=provider_id).exists():
|
||||
raise ProviderDeletedException(
|
||||
f"Provider '{provider_id}' was deleted during the scan"
|
||||
) from None
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -66,6 +66,10 @@ class ProviderConnectionError(Exception):
|
||||
"""Base exception for provider connection errors."""
|
||||
|
||||
|
||||
class ProviderDeletedException(Exception):
|
||||
"""Raised when a provider has been deleted during scan/task execution."""
|
||||
|
||||
|
||||
def custom_exception_handler(exc, context):
|
||||
if isinstance(exc, django_validation_error):
|
||||
if hasattr(exc, "error_dict"):
|
||||
|
||||
@@ -23,7 +23,9 @@ from api.db_utils import (
|
||||
StatusEnumField,
|
||||
)
|
||||
from api.models import (
|
||||
AttackSurfaceOverview,
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
Integration,
|
||||
Invitation,
|
||||
@@ -41,6 +43,7 @@ from api.models import (
|
||||
ResourceTag,
|
||||
Role,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
ScanSummary,
|
||||
SeverityChoices,
|
||||
StateChoices,
|
||||
@@ -155,6 +158,9 @@ class CommonFindingFilters(FilterSet):
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
category = CharFilter(method="filter_category")
|
||||
category__in = CharInFilter(field_name="categories", lookup_expr="overlap")
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
@@ -186,6 +192,9 @@ class CommonFindingFilters(FilterSet):
|
||||
def filter_resource_type(self, queryset, name, value):
|
||||
return queryset.filter(resource_types__contains=[value])
|
||||
|
||||
def filter_category(self, queryset, name, value):
|
||||
return queryset.filter(categories__contains=[value])
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
@@ -760,15 +769,7 @@ class RoleFilter(FilterSet):
|
||||
|
||||
class ComplianceOverviewFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
scan_id = UUIDFilter(field_name="scan_id")
|
||||
provider_id = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
|
||||
provider_id__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
field_name="scan__provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="scan__provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
scan_id = UUIDFilter(field_name="scan_id", required=True)
|
||||
region = CharFilter(field_name="region")
|
||||
|
||||
class Meta:
|
||||
@@ -802,6 +803,68 @@ class ScanSummaryFilter(FilterSet):
|
||||
}
|
||||
|
||||
|
||||
class DailySeveritySummaryFilter(FilterSet):
|
||||
"""Filter for findings_severity/timeseries endpoint."""
|
||||
|
||||
MAX_DATE_RANGE_DAYS = 365
|
||||
|
||||
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
|
||||
provider_id__in = UUIDInFilter(field_name="provider_id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
field_name="provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
date_from = DateFilter(method="filter_noop")
|
||||
date_to = DateFilter(method="filter_noop")
|
||||
|
||||
class Meta:
|
||||
model = DailySeveritySummary
|
||||
fields = ["provider_id"]
|
||||
|
||||
def filter_noop(self, queryset, name, value):
|
||||
return queryset
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
if not self.data.get("date_from"):
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": "This query parameter is required.",
|
||||
"status": "400",
|
||||
"source": {"pointer": "filter[date_from]"},
|
||||
"code": "required",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
today = date.today()
|
||||
date_from = self.form.cleaned_data.get("date_from")
|
||||
date_to = min(self.form.cleaned_data.get("date_to") or today, today)
|
||||
|
||||
if (date_to - date_from).days > self.MAX_DATE_RANGE_DAYS:
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"Date range cannot exceed {self.MAX_DATE_RANGE_DAYS} days.",
|
||||
"status": "400",
|
||||
"source": {"pointer": "filter[date_from]"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
# View access
|
||||
self.request._date_from = date_from
|
||||
self.request._date_to = date_to
|
||||
|
||||
# Apply date filter (only lte for fill-forward logic)
|
||||
queryset = queryset.filter(date__lte=date_to)
|
||||
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
|
||||
class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
"""Filter for findings_severity ScanSummary endpoint - includes status filters"""
|
||||
|
||||
@@ -1021,3 +1084,41 @@ class ThreatScoreSnapshotFilter(FilterSet):
|
||||
"inserted_at": ["date", "gte", "lte"],
|
||||
"overall_score": ["exact", "gte", "lte"],
|
||||
}
|
||||
|
||||
|
||||
class AttackSurfaceOverviewFilter(FilterSet):
|
||||
"""Filter for attack surface overview aggregations by provider."""
|
||||
|
||||
provider_id = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
|
||||
provider_id__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
field_name="scan__provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="scan__provider__provider",
|
||||
choices=Provider.ProviderChoices.choices,
|
||||
lookup_expr="in",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = AttackSurfaceOverview
|
||||
fields = {}
|
||||
|
||||
|
||||
class CategoryOverviewFilter(FilterSet):
|
||||
provider_id = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
|
||||
provider_id__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
field_name="scan__provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="scan__provider__provider",
|
||||
choices=Provider.ProviderChoices.choices,
|
||||
lookup_expr="in",
|
||||
)
|
||||
category = CharFilter(field_name="category", lookup_expr="exact")
|
||||
category__in = CharInFilter(field_name="category", lookup_expr="in")
|
||||
|
||||
class Meta:
|
||||
model = ScanCategorySummary
|
||||
fields = {}
|
||||
|
||||
@@ -22,7 +22,7 @@ class Migration(migrations.Migration):
|
||||
("kubernetes", "Kubernetes"),
|
||||
("m365", "M365"),
|
||||
("github", "GitHub"),
|
||||
("oci", "Oracle Cloud Infrastructure"),
|
||||
("oraclecloud", "Oracle Cloud Infrastructure"),
|
||||
("iac", "IaC"),
|
||||
],
|
||||
default="aws",
|
||||
|
||||
@@ -29,4 +29,8 @@ class Migration(migrations.Migration):
|
||||
default="aws",
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'mongodbatlas';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
# Generated by Django 5.1.14 on 2025-11-19 13:03
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0059_compliance_overview_summary"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="AttackSurfaceOverview",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
(
|
||||
"attack_surface_type",
|
||||
models.CharField(
|
||||
choices=[
|
||||
("internet-exposed", "Internet Exposed"),
|
||||
("secrets", "Exposed Secrets"),
|
||||
("privilege-escalation", "Privilege Escalation"),
|
||||
("ec2-imdsv1", "EC2 IMDSv1 Enabled"),
|
||||
],
|
||||
max_length=50,
|
||||
),
|
||||
),
|
||||
("total_findings", models.IntegerField(default=0)),
|
||||
("failed_findings", models.IntegerField(default=0)),
|
||||
("muted_failed_findings", models.IntegerField(default=0)),
|
||||
],
|
||||
options={
|
||||
"db_table": "attack_surface_overviews",
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="attacksurfaceoverview",
|
||||
name="scan",
|
||||
field=models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="attack_surface_overviews",
|
||||
related_query_name="attack_surface_overview",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="attacksurfaceoverview",
|
||||
name="tenant",
|
||||
field=models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="attacksurfaceoverview",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="attack_surf_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="attacksurfaceoverview",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "attack_surface_type"),
|
||||
name="unique_attack_surface_per_scan",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="attacksurfaceoverview",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_attacksurfaceoverview",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,96 @@
|
||||
# Generated by Django 5.1.14 on 2025-12-03 13:38
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0060_attack_surface_overview"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="DailySeveritySummary",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("date", models.DateField()),
|
||||
("critical", models.IntegerField(default=0)),
|
||||
("high", models.IntegerField(default=0)),
|
||||
("medium", models.IntegerField(default=0)),
|
||||
("low", models.IntegerField(default=0)),
|
||||
("informational", models.IntegerField(default=0)),
|
||||
("muted", models.IntegerField(default=0)),
|
||||
(
|
||||
"provider",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="daily_severity_summaries",
|
||||
related_query_name="daily_severity_summary",
|
||||
to="api.provider",
|
||||
),
|
||||
),
|
||||
(
|
||||
"scan",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="daily_severity_summaries",
|
||||
related_query_name="daily_severity_summary",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="api.tenant",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "daily_severity_summaries",
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="dailyseveritysummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "id"],
|
||||
name="dss_tenant_id_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="dailyseveritysummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="dss_tenant_provider_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="dailyseveritysummary",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("tenant_id", "provider", "date"),
|
||||
name="unique_daily_severity_summary",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="dailyseveritysummary",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_dailyseveritysummary",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,30 @@
|
||||
# Generated by Django 5.1.14 on 2025-12-10
|
||||
|
||||
from django.db import migrations
|
||||
from tasks.tasks import backfill_daily_severity_summaries_task
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.rls import Tenant
|
||||
|
||||
|
||||
def trigger_backfill_task(apps, schema_editor):
|
||||
"""
|
||||
Trigger the backfill task for all tenants.
|
||||
|
||||
This dispatches backfill_daily_severity_summaries_task for each tenant
|
||||
in the system to populate DailySeveritySummary records from historical scans.
|
||||
"""
|
||||
tenant_ids = Tenant.objects.using(MainRouter.admin_db).values_list("id", flat=True)
|
||||
|
||||
for tenant_id in tenant_ids:
|
||||
backfill_daily_severity_summaries_task.delay(tenant_id=str(tenant_id), days=90)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0061_daily_severity_summary"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(trigger_backfill_task, migrations.RunPython.noop),
|
||||
]
|
||||
@@ -0,0 +1,108 @@
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.db_utils
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0062_backfill_daily_severity_summaries"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="ScanCategorySummary",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant_id",
|
||||
models.UUIDField(db_index=True, editable=False),
|
||||
),
|
||||
(
|
||||
"inserted_at",
|
||||
models.DateTimeField(auto_now_add=True),
|
||||
),
|
||||
(
|
||||
"scan",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="category_summaries",
|
||||
related_query_name="category_summary",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
(
|
||||
"category",
|
||||
models.CharField(max_length=100),
|
||||
),
|
||||
(
|
||||
"severity",
|
||||
api.db_utils.SeverityEnumField(
|
||||
choices=[
|
||||
("critical", "Critical"),
|
||||
("high", "High"),
|
||||
("medium", "Medium"),
|
||||
("low", "Low"),
|
||||
("informational", "Informational"),
|
||||
],
|
||||
max_length=50,
|
||||
),
|
||||
),
|
||||
(
|
||||
"total_findings",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Non-muted findings (PASS + FAIL)"
|
||||
),
|
||||
),
|
||||
(
|
||||
"failed_findings",
|
||||
models.IntegerField(
|
||||
default=0,
|
||||
help_text="Non-muted FAIL findings (subset of total_findings)",
|
||||
),
|
||||
),
|
||||
(
|
||||
"new_failed_findings",
|
||||
models.IntegerField(
|
||||
default=0,
|
||||
help_text="Non-muted FAIL with delta='new' (subset of failed_findings)",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "scan_category_summaries",
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="scancategorysummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan"], name="scs_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="scancategorysummary",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "category", "severity"),
|
||||
name="unique_category_severity_per_scan",
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="scancategorysummary",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_scancategorysummary",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
import django.contrib.postgres.fields
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0063_scan_category_summary"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="categories",
|
||||
field=django.contrib.postgres.fields.ArrayField(
|
||||
base_field=models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
size=None,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -868,6 +868,14 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
null=True,
|
||||
)
|
||||
|
||||
# Check metadata denormalization
|
||||
categories = ArrayField(
|
||||
models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
help_text="Categories from check metadata for efficient filtering",
|
||||
)
|
||||
|
||||
# Relationships
|
||||
scan = models.ForeignKey(to=Scan, related_name="findings", on_delete=models.CASCADE)
|
||||
|
||||
@@ -1500,6 +1508,65 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
resource_name = "scan-summaries"
|
||||
|
||||
|
||||
class DailySeveritySummary(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Pre-aggregated daily severity counts per provider.
|
||||
Used by findings_severity/timeseries endpoint for efficient queries.
|
||||
"""
|
||||
|
||||
objects = ActiveProviderManager()
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
date = models.DateField()
|
||||
|
||||
provider = models.ForeignKey(
|
||||
Provider,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="daily_severity_summaries",
|
||||
related_query_name="daily_severity_summary",
|
||||
)
|
||||
scan = models.ForeignKey(
|
||||
Scan,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="daily_severity_summaries",
|
||||
related_query_name="daily_severity_summary",
|
||||
)
|
||||
|
||||
# Aggregated fail counts by severity
|
||||
critical = models.IntegerField(default=0)
|
||||
high = models.IntegerField(default=0)
|
||||
medium = models.IntegerField(default=0)
|
||||
low = models.IntegerField(default=0)
|
||||
informational = models.IntegerField(default=0)
|
||||
muted = models.IntegerField(default=0)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "daily_severity_summaries"
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "provider", "date"),
|
||||
name="unique_daily_severity_summary",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "id"],
|
||||
name="dss_tenant_id_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="dss_tenant_provider_idx",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class Integration(RowLevelSecurityProtectedModel):
|
||||
class IntegrationChoices(models.TextChoices):
|
||||
AMAZON_S3 = "amazon_s3", _("Amazon S3")
|
||||
@@ -1892,6 +1959,64 @@ class ResourceScanSummary(RowLevelSecurityProtectedModel):
|
||||
]
|
||||
|
||||
|
||||
class ScanCategorySummary(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Pre-aggregated category metrics per scan by severity.
|
||||
|
||||
Stores one row per (category, severity) combination per scan for efficient
|
||||
overview queries. Categories come from check_metadata.categories.
|
||||
|
||||
Count relationships (each is a subset of the previous):
|
||||
- total_findings >= failed_findings >= new_failed_findings
|
||||
"""
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
|
||||
scan = models.ForeignKey(
|
||||
Scan,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="category_summaries",
|
||||
related_query_name="category_summary",
|
||||
)
|
||||
|
||||
category = models.CharField(max_length=100)
|
||||
severity = SeverityEnumField(choices=SeverityChoices)
|
||||
|
||||
total_findings = models.IntegerField(
|
||||
default=0, help_text="Non-muted findings (PASS + FAIL)"
|
||||
)
|
||||
failed_findings = models.IntegerField(
|
||||
default=0, help_text="Non-muted FAIL findings (subset of total_findings)"
|
||||
)
|
||||
new_failed_findings = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Non-muted FAIL with delta='new' (subset of failed_findings)",
|
||||
)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "scan_category_summaries"
|
||||
|
||||
indexes = [
|
||||
models.Index(fields=["tenant_id", "scan"], name="scs_tenant_scan_idx"),
|
||||
]
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "category", "severity"),
|
||||
name="unique_category_severity_per_scan",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scan-category-summaries"
|
||||
|
||||
|
||||
class LighthouseConfiguration(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Stores configuration and API keys for LLM services.
|
||||
@@ -2405,3 +2530,63 @@ class ThreatScoreSnapshot(RowLevelSecurityProtectedModel):
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "threatscore-snapshots"
|
||||
|
||||
|
||||
class AttackSurfaceOverview(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Pre-aggregated attack surface metrics per scan.
|
||||
|
||||
Stores counts for each attack surface type (internet-exposed, secrets,
|
||||
privilege-escalation, ec2-imdsv1) to enable fast overview queries.
|
||||
"""
|
||||
|
||||
class AttackSurfaceTypeChoices(models.TextChoices):
|
||||
INTERNET_EXPOSED = "internet-exposed", _("Internet Exposed")
|
||||
SECRETS = "secrets", _("Exposed Secrets")
|
||||
PRIVILEGE_ESCALATION = "privilege-escalation", _("Privilege Escalation")
|
||||
EC2_IMDSV1 = "ec2-imdsv1", _("EC2 IMDSv1 Enabled")
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
|
||||
scan = models.ForeignKey(
|
||||
Scan,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="attack_surface_overviews",
|
||||
related_query_name="attack_surface_overview",
|
||||
)
|
||||
|
||||
attack_surface_type = models.CharField(
|
||||
max_length=50,
|
||||
choices=AttackSurfaceTypeChoices.choices,
|
||||
)
|
||||
|
||||
# Finding counts
|
||||
total_findings = models.IntegerField(default=0) # All findings (PASS + FAIL)
|
||||
failed_findings = models.IntegerField(default=0) # Non-muted failed findings
|
||||
muted_failed_findings = models.IntegerField(default=0) # Muted failed findings
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "attack_surface_overviews"
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "attack_surface_type"),
|
||||
name="unique_attack_surface_per_scan",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="attack_surf_tenant_scan_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-surface-overviews"
|
||||
|
||||
@@ -65,11 +65,11 @@ def get_providers(role: Role) -> QuerySet[Provider]:
|
||||
A QuerySet of Provider objects filtered by the role's provider groups.
|
||||
If the role has no provider groups, returns an empty queryset.
|
||||
"""
|
||||
tenant = role.tenant
|
||||
tenant_id = role.tenant_id
|
||||
provider_groups = role.provider_groups.all()
|
||||
if not provider_groups.exists():
|
||||
return Provider.objects.none()
|
||||
|
||||
return Provider.objects.filter(
|
||||
tenant=tenant, provider_groups__in=provider_groups
|
||||
tenant_id=tenant_id, provider_groups__in=provider_groups
|
||||
).distinct()
|
||||
|
||||
@@ -2,9 +2,12 @@ import uuid
|
||||
from unittest.mock import call, patch
|
||||
|
||||
import pytest
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import IntegrityError
|
||||
|
||||
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
|
||||
from api.decorators import set_tenant
|
||||
from api.decorators import handle_provider_deletion, set_tenant
|
||||
from api.exceptions import ProviderDeletedException
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -34,3 +37,142 @@ class TestSetTenantDecorator:
|
||||
|
||||
with pytest.raises(KeyError):
|
||||
random_func("test_arg")
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestHandleProviderDeletionDecorator:
|
||||
def test_success_no_exception(self, tenants_fixture, providers_fixture):
|
||||
"""Decorated function runs normally when no exception is raised."""
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
return "success"
|
||||
|
||||
result = task_func(
|
||||
tenant_id=str(tenant.id),
|
||||
provider_id=str(provider.id),
|
||||
)
|
||||
assert result == "success"
|
||||
|
||||
@patch("api.decorators.rls_transaction")
|
||||
@patch("api.decorators.Provider.objects.filter")
|
||||
def test_provider_deleted_with_provider_id(
|
||||
self, mock_filter, mock_rls, tenants_fixture
|
||||
):
|
||||
"""Raises ProviderDeletedException when provider_id provided and provider deleted."""
|
||||
tenant = tenants_fixture[0]
|
||||
deleted_provider_id = str(uuid.uuid4())
|
||||
|
||||
mock_rls.return_value.__enter__ = lambda s: None
|
||||
mock_rls.return_value.__exit__ = lambda s, *args: None
|
||||
mock_filter.return_value.exists.return_value = False
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise ObjectDoesNotExist("Some object not found")
|
||||
|
||||
with pytest.raises(ProviderDeletedException) as exc_info:
|
||||
task_func(tenant_id=str(tenant.id), provider_id=deleted_provider_id)
|
||||
|
||||
assert deleted_provider_id in str(exc_info.value)
|
||||
|
||||
@patch("api.decorators.rls_transaction")
|
||||
@patch("api.decorators.Provider.objects.filter")
|
||||
@patch("api.decorators.Scan.objects.filter")
|
||||
def test_provider_deleted_with_scan_id(
|
||||
self, mock_scan_filter, mock_provider_filter, mock_rls, tenants_fixture
|
||||
):
|
||||
"""Raises ProviderDeletedException when scan exists but provider deleted."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan_id = str(uuid.uuid4())
|
||||
provider_id = str(uuid.uuid4())
|
||||
|
||||
mock_rls.return_value.__enter__ = lambda s: None
|
||||
mock_rls.return_value.__exit__ = lambda s, *args: None
|
||||
|
||||
mock_scan = type("MockScan", (), {"provider_id": provider_id})()
|
||||
mock_scan_filter.return_value.first.return_value = mock_scan
|
||||
mock_provider_filter.return_value.exists.return_value = False
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise ObjectDoesNotExist("Some object not found")
|
||||
|
||||
with pytest.raises(ProviderDeletedException) as exc_info:
|
||||
task_func(tenant_id=str(tenant.id), scan_id=scan_id)
|
||||
|
||||
assert provider_id in str(exc_info.value)
|
||||
|
||||
@patch("api.decorators.rls_transaction")
|
||||
@patch("api.decorators.Scan.objects.filter")
|
||||
def test_scan_deleted_cascade(self, mock_scan_filter, mock_rls, tenants_fixture):
|
||||
"""Raises ProviderDeletedException when scan was deleted (CASCADE from provider)."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan_id = str(uuid.uuid4())
|
||||
|
||||
mock_rls.return_value.__enter__ = lambda s: None
|
||||
mock_rls.return_value.__exit__ = lambda s, *args: None
|
||||
mock_scan_filter.return_value.first.return_value = None
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise ObjectDoesNotExist("Some object not found")
|
||||
|
||||
with pytest.raises(ProviderDeletedException) as exc_info:
|
||||
task_func(tenant_id=str(tenant.id), scan_id=scan_id)
|
||||
|
||||
assert scan_id in str(exc_info.value)
|
||||
|
||||
@patch("api.decorators.rls_transaction")
|
||||
@patch("api.decorators.Provider.objects.filter")
|
||||
def test_provider_exists_reraises_original(
|
||||
self, mock_filter, mock_rls, tenants_fixture, providers_fixture
|
||||
):
|
||||
"""Re-raises original exception when provider still exists."""
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
mock_rls.return_value.__enter__ = lambda s: None
|
||||
mock_rls.return_value.__exit__ = lambda s, *args: None
|
||||
mock_filter.return_value.exists.return_value = True
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise ObjectDoesNotExist("Actual object missing")
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
task_func(tenant_id=str(tenant.id), provider_id=str(provider.id))
|
||||
|
||||
@patch("api.decorators.rls_transaction")
|
||||
@patch("api.decorators.Provider.objects.filter")
|
||||
def test_integrity_error_provider_deleted(
|
||||
self, mock_filter, mock_rls, tenants_fixture
|
||||
):
|
||||
"""Raises ProviderDeletedException on IntegrityError when provider deleted."""
|
||||
tenant = tenants_fixture[0]
|
||||
deleted_provider_id = str(uuid.uuid4())
|
||||
|
||||
mock_rls.return_value.__enter__ = lambda s: None
|
||||
mock_rls.return_value.__exit__ = lambda s, *args: None
|
||||
mock_filter.return_value.exists.return_value = False
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise IntegrityError("FK constraint violation")
|
||||
|
||||
with pytest.raises(ProviderDeletedException):
|
||||
task_func(tenant_id=str(tenant.id), provider_id=deleted_provider_id)
|
||||
|
||||
def test_missing_provider_and_scan_raises_assertion(self, tenants_fixture):
|
||||
"""Raises AssertionError when neither provider_id nor scan_id in kwargs."""
|
||||
|
||||
@handle_provider_deletion
|
||||
def task_func(**kwargs):
|
||||
raise ObjectDoesNotExist("Some object not found")
|
||||
|
||||
with pytest.raises(AssertionError) as exc_info:
|
||||
task_func(tenant_id=str(tenants_fixture[0].id))
|
||||
|
||||
assert "provider or scan" in str(exc_info.value)
|
||||
|
||||
@@ -21,6 +21,7 @@ from prowler.providers.aws.lib.security_hub.security_hub import SecurityHubConne
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.github.github_provider import GithubProvider
|
||||
from prowler.providers.iac.iac_provider import IacProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
from prowler.providers.mongodbatlas.mongodbatlas_provider import MongodbatlasProvider
|
||||
@@ -114,6 +115,7 @@ class TestReturnProwlerProvider:
|
||||
(Provider.ProviderChoices.GITHUB.value, GithubProvider),
|
||||
(Provider.ProviderChoices.MONGODBATLAS.value, MongodbatlasProvider),
|
||||
(Provider.ProviderChoices.ORACLECLOUD.value, OraclecloudProvider),
|
||||
(Provider.ProviderChoices.IAC.value, IacProvider),
|
||||
],
|
||||
)
|
||||
def test_return_prowler_provider(self, provider_type, expected_provider):
|
||||
@@ -254,6 +256,72 @@ class TestGetProwlerProviderKwargs:
|
||||
expected_result = {**secret_dict, "mutelist_content": {"key": "value"}}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider(self):
|
||||
"""Test that IaC provider gets correct kwargs with repository URL."""
|
||||
provider_uid = "https://github.com/org/repo"
|
||||
secret_dict = {"access_token": "test_token"}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider)
|
||||
|
||||
expected_result = {
|
||||
"scan_repository_url": provider_uid,
|
||||
"oauth_app_token": "test_token",
|
||||
}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider_without_token(self):
|
||||
"""Test that IaC provider works without access token for public repos."""
|
||||
provider_uid = "https://github.com/org/public-repo"
|
||||
secret_dict = {}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider)
|
||||
|
||||
expected_result = {"scan_repository_url": provider_uid}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider_ignores_mutelist(self):
|
||||
"""Test that IaC provider does NOT receive mutelist_content.
|
||||
|
||||
IaC provider uses Trivy's built-in mutelist logic, so it should not
|
||||
receive mutelist_content even when a mutelist processor is configured.
|
||||
"""
|
||||
provider_uid = "https://github.com/org/repo"
|
||||
secret_dict = {"access_token": "test_token"}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
mutelist_processor = MagicMock()
|
||||
mutelist_processor.configuration = {"Mutelist": {"key": "value"}}
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider, mutelist_processor)
|
||||
|
||||
# IaC provider should NOT have mutelist_content
|
||||
assert "mutelist_content" not in result
|
||||
expected_result = {
|
||||
"scan_repository_url": provider_uid,
|
||||
"oauth_app_token": "test_token",
|
||||
}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_unsupported_provider(self):
|
||||
# Setup
|
||||
provider_uid = "provider_uid"
|
||||
|
||||
@@ -158,7 +158,8 @@ def get_prowler_provider_kwargs(
|
||||
|
||||
if mutelist_processor:
|
||||
mutelist_content = mutelist_processor.configuration.get("Mutelist", {})
|
||||
if mutelist_content:
|
||||
# IaC provider doesn't support mutelist (uses Trivy's built-in logic)
|
||||
if mutelist_content and provider.provider != Provider.ProviderChoices.IAC.value:
|
||||
prowler_provider_kwargs["mutelist_content"] = mutelist_content
|
||||
|
||||
return prowler_provider_kwargs
|
||||
@@ -381,10 +382,18 @@ def get_findings_metadata_no_aggregations(tenant_id: str, filtered_queryset):
|
||||
regions = sorted({region for region in aggregation["regions"] or [] if region})
|
||||
resource_types = sorted(set(aggregation["resource_types"] or []))
|
||||
|
||||
# Aggregate categories from findings
|
||||
categories_set = set()
|
||||
for categories_list in filtered_queryset.values_list("categories", flat=True):
|
||||
if categories_list:
|
||||
categories_set.update(categories_list)
|
||||
categories = sorted(categories_set)
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
"categories": categories,
|
||||
}
|
||||
|
||||
serializer = FindingMetadataSerializer(data=result)
|
||||
|
||||
@@ -40,11 +40,16 @@ class BedrockCredentialsSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for AWS Bedrock credentials validation.
|
||||
|
||||
Validates long-term AWS credentials (AKIA) and region format.
|
||||
Supports two authentication methods:
|
||||
1. AWS access key + secret key
|
||||
2. Bedrock API key (bearer token)
|
||||
|
||||
In both cases, region is mandatory.
|
||||
"""
|
||||
|
||||
access_key_id = serializers.CharField()
|
||||
secret_access_key = serializers.CharField()
|
||||
access_key_id = serializers.CharField(required=False, allow_blank=False)
|
||||
secret_access_key = serializers.CharField(required=False, allow_blank=False)
|
||||
api_key = serializers.CharField(required=False, allow_blank=False)
|
||||
region = serializers.CharField()
|
||||
|
||||
def validate_access_key_id(self, value: str) -> str:
|
||||
@@ -65,6 +70,15 @@ class BedrockCredentialsSerializer(serializers.Serializer):
|
||||
)
|
||||
return value
|
||||
|
||||
def validate_api_key(self, value: str) -> str:
|
||||
"""
|
||||
Validate Bedrock API key (bearer token).
|
||||
"""
|
||||
pattern = r"^ABSKQmVkcm9ja0FQSUtleS[A-Za-z0-9+/=]{110}$"
|
||||
if not re.match(pattern, value or ""):
|
||||
raise serializers.ValidationError("Invalid Bedrock API key format.")
|
||||
return value
|
||||
|
||||
def validate_region(self, value: str) -> str:
|
||||
"""Validate AWS region format."""
|
||||
pattern = r"^[a-z]{2}-[a-z]+-\d+$"
|
||||
@@ -74,6 +88,50 @@ class BedrockCredentialsSerializer(serializers.Serializer):
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
"""
|
||||
Enforce either:
|
||||
- access_key_id + secret_access_key + region
|
||||
OR
|
||||
- api_key + region
|
||||
"""
|
||||
access_key_id = attrs.get("access_key_id")
|
||||
secret_access_key = attrs.get("secret_access_key")
|
||||
api_key = attrs.get("api_key")
|
||||
region = attrs.get("region")
|
||||
|
||||
errors = {}
|
||||
|
||||
if not region:
|
||||
errors["region"] = ["Region is required."]
|
||||
|
||||
using_access_keys = bool(access_key_id or secret_access_key)
|
||||
using_api_key = api_key is not None and api_key != ""
|
||||
|
||||
if using_access_keys and using_api_key:
|
||||
errors["non_field_errors"] = [
|
||||
"Provide either access key + secret key OR api key, not both."
|
||||
]
|
||||
elif not using_access_keys and not using_api_key:
|
||||
errors["non_field_errors"] = [
|
||||
"You must provide either access key + secret key OR api key."
|
||||
]
|
||||
elif using_access_keys:
|
||||
# Both access_key_id and secret_access_key must be present together
|
||||
if not access_key_id:
|
||||
errors.setdefault("access_key_id", []).append(
|
||||
"AWS access key ID is required when using access key authentication."
|
||||
)
|
||||
if not secret_access_key:
|
||||
errors.setdefault("secret_access_key", []).append(
|
||||
"AWS secret access key is required when using access key authentication."
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
return attrs
|
||||
|
||||
def to_internal_value(self, data):
|
||||
"""Check for unknown fields before DRF filters them out."""
|
||||
if not isinstance(data, dict):
|
||||
@@ -111,6 +169,15 @@ class BedrockCredentialsUpdateSerializer(BedrockCredentialsSerializer):
|
||||
for field in self.fields.values():
|
||||
field.required = False
|
||||
|
||||
def validate(self, attrs):
|
||||
"""
|
||||
For updates, this serializer only checks individual fields.
|
||||
It does NOT enforce the "either access keys OR api key" rule.
|
||||
That rule is applied later, after merging with existing stored
|
||||
credentials, in LighthouseProviderConfigUpdateSerializer.
|
||||
"""
|
||||
return attrs
|
||||
|
||||
|
||||
class OpenAICompatibleCredentialsSerializer(serializers.Serializer):
|
||||
"""
|
||||
@@ -168,27 +235,51 @@ class OpenAICompatibleCredentialsSerializer(serializers.Serializer):
|
||||
"required": ["api_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Bedrock Credentials",
|
||||
"properties": {
|
||||
"access_key_id": {
|
||||
"type": "string",
|
||||
"description": "AWS access key ID.",
|
||||
"pattern": "^AKIA[0-9A-Z]{16}$",
|
||||
"oneOf": [
|
||||
{
|
||||
"title": "IAM Access Key Pair",
|
||||
"type": "object",
|
||||
"description": "Authenticate with AWS access key and secret key. Recommended when you manage IAM users or roles.",
|
||||
"properties": {
|
||||
"access_key_id": {
|
||||
"type": "string",
|
||||
"description": "AWS access key ID.",
|
||||
"pattern": "^AKIA[0-9A-Z]{16}$",
|
||||
},
|
||||
"secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "AWS secret access key.",
|
||||
"pattern": "^[A-Za-z0-9/+=]{40}$",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "AWS region identifier where Bedrock is available. Examples: us-east-1, "
|
||||
"us-west-2, eu-west-1, ap-northeast-1.",
|
||||
"pattern": "^[a-z]{2}-[a-z]+-\\d+$",
|
||||
},
|
||||
},
|
||||
"required": ["access_key_id", "secret_access_key", "region"],
|
||||
},
|
||||
"secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "AWS secret access key.",
|
||||
"pattern": "^[A-Za-z0-9/+=]{40}$",
|
||||
{
|
||||
"title": "Amazon Bedrock API Key",
|
||||
"type": "object",
|
||||
"description": "Authenticate with an Amazon Bedrock API key (bearer token). Region is still required.",
|
||||
"properties": {
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"description": "Amazon Bedrock API key (bearer token).",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "AWS region identifier where Bedrock is available. Examples: us-east-1, "
|
||||
"us-west-2, eu-west-1, ap-northeast-1.",
|
||||
"pattern": "^[a-z]{2}-[a-z]+-\\d+$",
|
||||
},
|
||||
},
|
||||
"required": ["api_key", "region"],
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "AWS region identifier where Bedrock is available. Examples: us-east-1, "
|
||||
"us-west-2, eu-west-1, ap-northeast-1.",
|
||||
"pattern": "^[a-z]{2}-[a-z]+-\\d+$",
|
||||
},
|
||||
},
|
||||
"required": ["access_key_id", "secret_access_key", "region"],
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
|
||||
@@ -72,6 +72,42 @@ from api.v1.serializer_utils.processors import ProcessorConfigField
|
||||
from api.v1.serializer_utils.providers import ProviderSecretField
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
|
||||
# Base
|
||||
|
||||
|
||||
class BaseModelSerializerV1(serializers.ModelSerializer):
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class BaseSerializerV1(serializers.Serializer):
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class BaseWriteSerializer(BaseModelSerializerV1):
|
||||
def validate(self, data):
|
||||
if hasattr(self, "initial_data"):
|
||||
initial_data = set(self.initial_data.keys()) - {"id", "type"}
|
||||
unknown_keys = initial_data - set(self.fields.keys())
|
||||
if unknown_keys:
|
||||
raise ValidationError(f"Invalid fields: {unknown_keys}")
|
||||
return data
|
||||
|
||||
|
||||
class RLSSerializer(BaseModelSerializerV1):
|
||||
def create(self, validated_data):
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
validated_data["tenant_id"] = tenant_id
|
||||
return super().create(validated_data)
|
||||
|
||||
|
||||
class StateEnumSerializerField(serializers.ChoiceField):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs["choices"] = StateChoices.choices
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
# Tokens
|
||||
|
||||
|
||||
@@ -179,7 +215,7 @@ class TokenSocialLoginSerializer(BaseTokenSerializer):
|
||||
|
||||
|
||||
# TODO: Check if we can change the parent class to TokenRefreshSerializer from rest_framework_simplejwt.serializers
|
||||
class TokenRefreshSerializer(serializers.Serializer):
|
||||
class TokenRefreshSerializer(BaseSerializerV1):
|
||||
refresh = serializers.CharField()
|
||||
|
||||
# Output token
|
||||
@@ -213,7 +249,7 @@ class TokenRefreshSerializer(serializers.Serializer):
|
||||
raise ValidationError({"refresh": "Invalid or expired token"})
|
||||
|
||||
|
||||
class TokenSwitchTenantSerializer(serializers.Serializer):
|
||||
class TokenSwitchTenantSerializer(BaseSerializerV1):
|
||||
tenant_id = serializers.UUIDField(
|
||||
write_only=True, help_text="The tenant ID for which to request a new token."
|
||||
)
|
||||
@@ -237,41 +273,10 @@ class TokenSwitchTenantSerializer(serializers.Serializer):
|
||||
return generate_tokens(user, tenant_id)
|
||||
|
||||
|
||||
# Base
|
||||
|
||||
|
||||
class BaseSerializerV1(serializers.ModelSerializer):
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class BaseWriteSerializer(BaseSerializerV1):
|
||||
def validate(self, data):
|
||||
if hasattr(self, "initial_data"):
|
||||
initial_data = set(self.initial_data.keys()) - {"id", "type"}
|
||||
unknown_keys = initial_data - set(self.fields.keys())
|
||||
if unknown_keys:
|
||||
raise ValidationError(f"Invalid fields: {unknown_keys}")
|
||||
return data
|
||||
|
||||
|
||||
class RLSSerializer(BaseSerializerV1):
|
||||
def create(self, validated_data):
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
validated_data["tenant_id"] = tenant_id
|
||||
return super().create(validated_data)
|
||||
|
||||
|
||||
class StateEnumSerializerField(serializers.ChoiceField):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs["choices"] = StateChoices.choices
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
# Users
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializerV1):
|
||||
class UserSerializer(BaseModelSerializerV1):
|
||||
"""
|
||||
Serializer for the User model.
|
||||
"""
|
||||
@@ -402,7 +407,7 @@ class UserUpdateSerializer(BaseWriteSerializer):
|
||||
return super().update(instance, validated_data)
|
||||
|
||||
|
||||
class RoleResourceIdentifierSerializer(serializers.Serializer):
|
||||
class RoleResourceIdentifierSerializer(BaseSerializerV1):
|
||||
resource_type = serializers.CharField(source="type")
|
||||
id = serializers.UUIDField()
|
||||
|
||||
@@ -585,7 +590,7 @@ class TaskSerializer(RLSSerializer, TaskBase):
|
||||
# Tenants
|
||||
|
||||
|
||||
class TenantSerializer(BaseSerializerV1):
|
||||
class TenantSerializer(BaseModelSerializerV1):
|
||||
"""
|
||||
Serializer for the Tenant model.
|
||||
"""
|
||||
@@ -597,7 +602,7 @@ class TenantSerializer(BaseSerializerV1):
|
||||
fields = ["id", "name", "memberships"]
|
||||
|
||||
|
||||
class TenantIncludeSerializer(BaseSerializerV1):
|
||||
class TenantIncludeSerializer(BaseModelSerializerV1):
|
||||
class Meta:
|
||||
model = Tenant
|
||||
fields = ["id", "name"]
|
||||
@@ -773,7 +778,7 @@ class ProviderGroupUpdateSerializer(ProviderGroupSerializer):
|
||||
return super().update(instance, validated_data)
|
||||
|
||||
|
||||
class ProviderResourceIdentifierSerializer(serializers.Serializer):
|
||||
class ProviderResourceIdentifierSerializer(BaseSerializerV1):
|
||||
resource_type = serializers.CharField(source="type")
|
||||
id = serializers.UUIDField()
|
||||
|
||||
@@ -1110,7 +1115,7 @@ class ScanTaskSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanReportSerializer(serializers.Serializer):
|
||||
class ScanReportSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(source="scan")
|
||||
|
||||
class Meta:
|
||||
@@ -1118,7 +1123,7 @@ class ScanReportSerializer(serializers.Serializer):
|
||||
fields = ["id"]
|
||||
|
||||
|
||||
class ScanComplianceReportSerializer(serializers.Serializer):
|
||||
class ScanComplianceReportSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(source="scan")
|
||||
name = serializers.CharField()
|
||||
|
||||
@@ -1267,7 +1272,7 @@ class ResourceIncludeSerializer(RLSSerializer):
|
||||
return fields
|
||||
|
||||
|
||||
class ResourceMetadataSerializer(serializers.Serializer):
|
||||
class ResourceMetadataSerializer(BaseSerializerV1):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
types = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
@@ -1296,6 +1301,7 @@ class FindingSerializer(RLSSerializer):
|
||||
"severity",
|
||||
"check_id",
|
||||
"check_metadata",
|
||||
"categories",
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
@@ -1337,7 +1343,7 @@ class FindingIncludeSerializer(RLSSerializer):
|
||||
|
||||
|
||||
# To be removed when the related endpoint is removed as well
|
||||
class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
class FindingDynamicFilterSerializer(BaseSerializerV1):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
|
||||
@@ -1345,12 +1351,13 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
resource_name = "finding-dynamic-filters"
|
||||
|
||||
|
||||
class FindingMetadataSerializer(serializers.Serializer):
|
||||
class FindingMetadataSerializer(BaseSerializerV1):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
resource_types = serializers.ListField(
|
||||
child=serializers.CharField(), allow_empty=True
|
||||
)
|
||||
categories = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
|
||||
|
||||
@@ -2039,7 +2046,7 @@ class RoleProviderGroupRelationshipSerializer(RLSSerializer, BaseWriteSerializer
|
||||
# Compliance overview
|
||||
|
||||
|
||||
class ComplianceOverviewSerializer(serializers.Serializer):
|
||||
class ComplianceOverviewSerializer(BaseSerializerV1):
|
||||
"""
|
||||
Serializer for compliance requirement status aggregated by compliance framework.
|
||||
|
||||
@@ -2061,7 +2068,7 @@ class ComplianceOverviewSerializer(serializers.Serializer):
|
||||
resource_name = "compliance-overviews"
|
||||
|
||||
|
||||
class ComplianceOverviewDetailSerializer(serializers.Serializer):
|
||||
class ComplianceOverviewDetailSerializer(BaseSerializerV1):
|
||||
"""
|
||||
Serializer for detailed compliance requirement information.
|
||||
|
||||
@@ -2090,7 +2097,7 @@ class ComplianceOverviewDetailThreatscoreSerializer(ComplianceOverviewDetailSeri
|
||||
total_findings = serializers.IntegerField()
|
||||
|
||||
|
||||
class ComplianceOverviewAttributesSerializer(serializers.Serializer):
|
||||
class ComplianceOverviewAttributesSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField()
|
||||
compliance_name = serializers.CharField()
|
||||
framework_description = serializers.CharField()
|
||||
@@ -2104,7 +2111,7 @@ class ComplianceOverviewAttributesSerializer(serializers.Serializer):
|
||||
resource_name = "compliance-requirements-attributes"
|
||||
|
||||
|
||||
class ComplianceOverviewMetadataSerializer(serializers.Serializer):
|
||||
class ComplianceOverviewMetadataSerializer(BaseSerializerV1):
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -2114,7 +2121,7 @@ class ComplianceOverviewMetadataSerializer(serializers.Serializer):
|
||||
# Overviews
|
||||
|
||||
|
||||
class OverviewProviderSerializer(serializers.Serializer):
|
||||
class OverviewProviderSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(source="provider")
|
||||
findings = serializers.SerializerMethodField(read_only=True)
|
||||
resources = serializers.SerializerMethodField(read_only=True)
|
||||
@@ -2122,9 +2129,6 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "providers-overview"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"type": "object",
|
||||
@@ -2158,18 +2162,15 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
}
|
||||
|
||||
|
||||
class OverviewProviderCountSerializer(serializers.Serializer):
|
||||
class OverviewProviderCountSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(source="provider")
|
||||
count = serializers.IntegerField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "providers-count-overview"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class OverviewFindingSerializer(serializers.Serializer):
|
||||
class OverviewFindingSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(default="n/a")
|
||||
new = serializers.IntegerField()
|
||||
changed = serializers.IntegerField()
|
||||
@@ -2188,15 +2189,12 @@ class OverviewFindingSerializer(serializers.Serializer):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "findings-overview"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.fields["pass"] = self.fields.pop("_pass")
|
||||
|
||||
|
||||
class OverviewSeveritySerializer(serializers.Serializer):
|
||||
class OverviewSeveritySerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(default="n/a")
|
||||
critical = serializers.IntegerField()
|
||||
high = serializers.IntegerField()
|
||||
@@ -2207,11 +2205,24 @@ class OverviewSeveritySerializer(serializers.Serializer):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "findings-severity-overview"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
class FindingsSeverityOverTimeSerializer(BaseSerializerV1):
|
||||
"""Serializer for daily findings severity trend data."""
|
||||
|
||||
id = serializers.DateField(source="date")
|
||||
critical = serializers.IntegerField()
|
||||
high = serializers.IntegerField()
|
||||
medium = serializers.IntegerField()
|
||||
low = serializers.IntegerField()
|
||||
informational = serializers.IntegerField()
|
||||
muted = serializers.IntegerField()
|
||||
scan_ids = serializers.ListField(child=serializers.UUIDField())
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "findings-severity-over-time"
|
||||
|
||||
|
||||
class OverviewServiceSerializer(serializers.Serializer):
|
||||
class OverviewServiceSerializer(BaseSerializerV1):
|
||||
id = serializers.CharField(source="service")
|
||||
total = serializers.IntegerField()
|
||||
_pass = serializers.IntegerField()
|
||||
@@ -2225,8 +2236,32 @@ class OverviewServiceSerializer(serializers.Serializer):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.fields["pass"] = self.fields.pop("_pass")
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
class AttackSurfaceOverviewSerializer(BaseSerializerV1):
|
||||
"""Serializer for attack surface overview aggregations."""
|
||||
|
||||
id = serializers.CharField(source="attack_surface_type")
|
||||
total_findings = serializers.IntegerField()
|
||||
failed_findings = serializers.IntegerField()
|
||||
muted_failed_findings = serializers.IntegerField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-surface-overviews"
|
||||
|
||||
|
||||
class CategoryOverviewSerializer(BaseSerializerV1):
|
||||
"""Serializer for category overview aggregations."""
|
||||
|
||||
id = serializers.CharField(source="category")
|
||||
total_findings = serializers.IntegerField()
|
||||
failed_findings = serializers.IntegerField()
|
||||
new_failed_findings = serializers.IntegerField()
|
||||
severity = serializers.JSONField(
|
||||
help_text="Severity breakdown: {informational, low, medium, high, critical}"
|
||||
)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "category-overviews"
|
||||
|
||||
|
||||
class OverviewRegionSerializer(serializers.Serializer):
|
||||
@@ -2256,7 +2291,7 @@ class OverviewRegionSerializer(serializers.Serializer):
|
||||
# Schedules
|
||||
|
||||
|
||||
class ScheduleDailyCreateSerializer(serializers.Serializer):
|
||||
class ScheduleDailyCreateSerializer(BaseSerializerV1):
|
||||
provider_id = serializers.UUIDField(required=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -2592,7 +2627,7 @@ class IntegrationUpdateSerializer(BaseWriteIntegrationSerializer):
|
||||
return representation
|
||||
|
||||
|
||||
class IntegrationJiraDispatchSerializer(serializers.Serializer):
|
||||
class IntegrationJiraDispatchSerializer(BaseSerializerV1):
|
||||
"""
|
||||
Serializer for dispatching findings to JIRA integration.
|
||||
"""
|
||||
@@ -2755,14 +2790,14 @@ class ProcessorUpdateSerializer(BaseWriteSerializer):
|
||||
# SSO
|
||||
|
||||
|
||||
class SamlInitiateSerializer(serializers.Serializer):
|
||||
class SamlInitiateSerializer(BaseSerializerV1):
|
||||
email_domain = serializers.CharField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "saml-initiate"
|
||||
|
||||
|
||||
class SamlMetadataSerializer(serializers.Serializer):
|
||||
class SamlMetadataSerializer(BaseSerializerV1):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "saml-meta"
|
||||
|
||||
@@ -3294,6 +3329,19 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
and provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
# For updates, enforce that the authentication method (access keys vs API key)
|
||||
# is immutable. To switch methods, the UI must delete and recreate the provider.
|
||||
existing_credentials = (
|
||||
self.instance.credentials_decoded if self.instance else {}
|
||||
) or {}
|
||||
|
||||
existing_uses_api_key = "api_key" in existing_credentials
|
||||
existing_uses_access_keys = any(
|
||||
k in existing_credentials
|
||||
for k in ("access_key_id", "secret_access_key")
|
||||
)
|
||||
|
||||
# First run field-level validation on the partial payload
|
||||
try:
|
||||
BedrockCredentialsUpdateSerializer(data=credentials).is_valid(
|
||||
raise_exception=True
|
||||
@@ -3304,6 +3352,31 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
|
||||
# Then enforce invariants about not changing the auth method
|
||||
# If the existing config uses an API key, forbid introducing access keys.
|
||||
if existing_uses_api_key and any(
|
||||
k in credentials for k in ("access_key_id", "secret_access_key")
|
||||
):
|
||||
raise ValidationError(
|
||||
{
|
||||
"credentials/non_field_errors": [
|
||||
"Cannot change Bedrock authentication method from API key "
|
||||
"to access key via update. Delete and recreate the provider instead."
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
# If the existing config uses access keys, forbid introducing an API key.
|
||||
if existing_uses_access_keys and "api_key" in credentials:
|
||||
raise ValidationError(
|
||||
{
|
||||
"credentials/non_field_errors": [
|
||||
"Cannot change Bedrock authentication method from access key "
|
||||
"to API key via update. Delete and recreate the provider instead."
|
||||
]
|
||||
}
|
||||
)
|
||||
elif (
|
||||
credentials is not None
|
||||
and provider_type
|
||||
|
||||
@@ -98,8 +98,11 @@ from api.db_router import MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.exceptions import TaskFailedException
|
||||
from api.filters import (
|
||||
AttackSurfaceOverviewFilter,
|
||||
CategoryOverviewFilter,
|
||||
ComplianceOverviewFilter,
|
||||
CustomDjangoFilterBackend,
|
||||
DailySeveritySummaryFilter,
|
||||
FindingFilter,
|
||||
IntegrationFilter,
|
||||
IntegrationJiraFindingsFilter,
|
||||
@@ -126,8 +129,10 @@ from api.filters import (
|
||||
UserFilter,
|
||||
)
|
||||
from api.models import (
|
||||
AttackSurfaceOverview,
|
||||
ComplianceOverviewSummary,
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
Integration,
|
||||
Invitation,
|
||||
@@ -152,6 +157,7 @@ from api.models import (
|
||||
SAMLDomainIndex,
|
||||
SAMLToken,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
ScanSummary,
|
||||
SeverityChoices,
|
||||
StateChoices,
|
||||
@@ -172,6 +178,8 @@ from api.utils import (
|
||||
from api.uuid_utils import datetime_to_uuid7, uuid7_start
|
||||
from api.v1.mixins import DisablePaginationMixin, PaginateByPkMixin, TaskManagementMixin
|
||||
from api.v1.serializers import (
|
||||
AttackSurfaceOverviewSerializer,
|
||||
CategoryOverviewSerializer,
|
||||
ComplianceOverviewAttributesSerializer,
|
||||
ComplianceOverviewDetailSerializer,
|
||||
ComplianceOverviewDetailThreatscoreSerializer,
|
||||
@@ -180,6 +188,7 @@ from api.v1.serializers import (
|
||||
FindingDynamicFilterSerializer,
|
||||
FindingMetadataSerializer,
|
||||
FindingSerializer,
|
||||
FindingsSeverityOverTimeSerializer,
|
||||
IntegrationCreateSerializer,
|
||||
IntegrationJiraDispatchSerializer,
|
||||
IntegrationSerializer,
|
||||
@@ -350,7 +359,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.15.0"
|
||||
spectacular_settings.VERSION = "1.17.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -2753,12 +2762,15 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
|
||||
queryset = ResourceScanSummary.objects.filter(tenant_id=tenant_id)
|
||||
scan_based_filters = {}
|
||||
category_scan_filters = {} # Filters for ScanCategorySummary
|
||||
|
||||
if scans := query_params.get("filter[scan__in]") or query_params.get(
|
||||
"filter[scan]"
|
||||
):
|
||||
queryset = queryset.filter(scan_id__in=scans.split(","))
|
||||
scan_based_filters = {"id__in": scans.split(",")}
|
||||
scan_ids_list = scans.split(",")
|
||||
queryset = queryset.filter(scan_id__in=scan_ids_list)
|
||||
scan_based_filters = {"id__in": scan_ids_list}
|
||||
category_scan_filters = {"scan_id__in": scan_ids_list}
|
||||
else:
|
||||
exact = query_params.get("filter[inserted_at]")
|
||||
gte = query_params.get("filter[inserted_at__gte]")
|
||||
@@ -2802,6 +2814,7 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
scan_based_filters = {
|
||||
key.lstrip("scan_"): value for key, value in date_filters.items()
|
||||
}
|
||||
category_scan_filters = date_filters
|
||||
|
||||
# ToRemove: Temporary fallback mechanism
|
||||
if not queryset.exists():
|
||||
@@ -2848,10 +2861,31 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
.order_by("resource_type")
|
||||
)
|
||||
|
||||
# Get categories from ScanCategorySummary using same scan filters
|
||||
categories = list(
|
||||
ScanCategorySummary.objects.filter(
|
||||
tenant_id=tenant_id, **category_scan_filters
|
||||
)
|
||||
.values_list("category", flat=True)
|
||||
.distinct()
|
||||
.order_by("category")
|
||||
)
|
||||
|
||||
# Fallback to finding aggregation if no ScanCategorySummary exists
|
||||
if not categories:
|
||||
categories_set = set()
|
||||
for categories_list in filtered_queryset.values_list(
|
||||
"categories", flat=True
|
||||
):
|
||||
if categories_list:
|
||||
categories_set.update(categories_list)
|
||||
categories = sorted(categories_set)
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
"categories": categories,
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
@@ -2956,10 +2990,36 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
.order_by("resource_type")
|
||||
)
|
||||
|
||||
# Get categories from ScanCategorySummary for latest scans
|
||||
categories = list(
|
||||
ScanCategorySummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id__in=latest_scans_queryset.values_list("id", flat=True),
|
||||
)
|
||||
.values_list("category", flat=True)
|
||||
.distinct()
|
||||
.order_by("category")
|
||||
)
|
||||
|
||||
# Fallback to finding aggregation if no ScanCategorySummary exists
|
||||
if not categories:
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset()).filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id__in=latest_scans_queryset.values_list("id", flat=True),
|
||||
)
|
||||
categories_set = set()
|
||||
for categories_list in filtered_queryset.values_list(
|
||||
"categories", flat=True
|
||||
):
|
||||
if categories_list:
|
||||
categories_set.update(categories_list)
|
||||
categories = sorted(categories_set)
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
"categories": categories,
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
@@ -3359,50 +3419,15 @@ class RoleProviderGroupRelationshipView(RelationshipView, BaseRLSViewSet):
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
tags=["Compliance Overview"],
|
||||
summary="List compliance overviews",
|
||||
description=(
|
||||
"Retrieve an overview of all compliance frameworks. "
|
||||
"If scan_id is provided, returns compliance data for that specific scan. "
|
||||
"If scan_id is omitted, returns compliance data aggregated from the latest completed scan of each provider."
|
||||
),
|
||||
summary="List compliance overviews for a scan",
|
||||
description="Retrieve an overview of all the compliance in a given scan.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="filter[scan_id]",
|
||||
required=False,
|
||||
required=True,
|
||||
type=OpenApiTypes.UUID,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description=(
|
||||
"Optional scan ID. If provided, returns compliance for that scan. "
|
||||
"If omitted, returns compliance for the latest completed scan per provider."
|
||||
),
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="filter[provider_id]",
|
||||
required=False,
|
||||
type=OpenApiTypes.UUID,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by specific provider ID.",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="filter[provider_id__in]",
|
||||
required=False,
|
||||
type={"type": "array", "items": {"type": "string", "format": "uuid"}},
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by multiple provider IDs (comma-separated).",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="filter[provider_type]",
|
||||
required=False,
|
||||
type=OpenApiTypes.STR,
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by provider type (e.g., aws, azure, gcp).",
|
||||
),
|
||||
OpenApiParameter(
|
||||
name="filter[provider_type__in]",
|
||||
required=False,
|
||||
type={"type": "array", "items": {"type": "string"}},
|
||||
location=OpenApiParameter.QUERY,
|
||||
description="Filter by multiple provider types (comma-separated).",
|
||||
description="Related scan ID.",
|
||||
),
|
||||
],
|
||||
responses={
|
||||
@@ -3581,7 +3606,19 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
|
||||
def _get_compliance_template(self, *, provider=None, scan_id=None):
|
||||
"""Return the compliance template for the given provider or scan."""
|
||||
if provider is None and scan_id is not None:
|
||||
scan = Scan.all_objects.select_related("provider").get(pk=scan_id)
|
||||
try:
|
||||
scan = Scan.all_objects.select_related("provider").get(pk=scan_id)
|
||||
except Scan.DoesNotExist:
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": "Scan not found",
|
||||
"status": 404,
|
||||
"source": {"pointer": "filter[scan_id]"},
|
||||
"code": "not_found",
|
||||
}
|
||||
]
|
||||
)
|
||||
provider = scan.provider
|
||||
|
||||
if not provider:
|
||||
@@ -3723,93 +3760,47 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
scan_id = request.query_params.get("filter[scan_id]")
|
||||
tenant_id = self.request.tenant_id
|
||||
|
||||
if scan_id:
|
||||
# Specific scan requested - use optimized summaries with region support
|
||||
region_filter = request.query_params.get(
|
||||
"filter[region]"
|
||||
) or request.query_params.get("filter[region__in]")
|
||||
# Specific scan requested - use optimized summaries with region support
|
||||
region_filter = request.query_params.get(
|
||||
"filter[region]"
|
||||
) or request.query_params.get("filter[region__in]")
|
||||
|
||||
if region_filter:
|
||||
# Fall back to detailed query with region filtering
|
||||
return self._list_with_region_filter(scan_id, region_filter)
|
||||
if region_filter:
|
||||
# Fall back to detailed query with region filtering
|
||||
return self._list_with_region_filter(scan_id, region_filter)
|
||||
|
||||
summaries = list(self._compliance_summaries_queryset(scan_id))
|
||||
if not summaries:
|
||||
# Trigger async backfill for next time
|
||||
backfill_compliance_summaries_task.delay(
|
||||
tenant_id=self.request.tenant_id, scan_id=scan_id
|
||||
)
|
||||
# Use fallback aggregation for this request
|
||||
return self._list_without_region_aggregation(scan_id)
|
||||
summaries = list(self._compliance_summaries_queryset(scan_id))
|
||||
if not summaries:
|
||||
# Trigger async backfill for next time
|
||||
backfill_compliance_summaries_task.delay(
|
||||
tenant_id=self.request.tenant_id, scan_id=scan_id
|
||||
)
|
||||
# Use fallback aggregation for this request
|
||||
return self._list_without_region_aggregation(scan_id)
|
||||
|
||||
# Get compliance template for provider to enrich with framework/version
|
||||
compliance_template = self._get_compliance_template(scan_id=scan_id)
|
||||
# Get compliance template for provider to enrich with framework/version
|
||||
compliance_template = self._get_compliance_template(scan_id=scan_id)
|
||||
|
||||
# Convert to response format with framework/version enrichment
|
||||
response_data = []
|
||||
for summary in summaries:
|
||||
compliance_metadata = compliance_template.get(summary.compliance_id, {})
|
||||
response_data.append(
|
||||
{
|
||||
"id": summary.compliance_id,
|
||||
"compliance_id": summary.compliance_id,
|
||||
"framework": compliance_metadata.get("framework", ""),
|
||||
"version": compliance_metadata.get("version", ""),
|
||||
"requirements_passed": summary.requirements_passed,
|
||||
"requirements_failed": summary.requirements_failed,
|
||||
"requirements_manual": summary.requirements_manual,
|
||||
"total_requirements": summary.total_requirements,
|
||||
}
|
||||
)
|
||||
|
||||
serializer = self.get_serializer(response_data, many=True)
|
||||
return Response(serializer.data)
|
||||
else:
|
||||
# No scan_id provided - use latest scans per provider
|
||||
# First, check if provider filters are present
|
||||
provider_id = request.query_params.get("filter[provider_id]")
|
||||
provider_id__in = request.query_params.get("filter[provider_id__in]")
|
||||
provider_type = request.query_params.get("filter[provider_type]")
|
||||
provider_type__in = request.query_params.get("filter[provider_type__in]")
|
||||
|
||||
scan_filters = {"tenant_id": tenant_id, "state": StateChoices.COMPLETED}
|
||||
|
||||
# Apply provider ID filters
|
||||
if provider_id:
|
||||
scan_filters["provider_id"] = provider_id
|
||||
elif provider_id__in:
|
||||
# Convert comma-separated string to list
|
||||
provider_ids = [pid.strip() for pid in provider_id__in.split(",")]
|
||||
scan_filters["provider_id__in"] = provider_ids
|
||||
|
||||
# Apply provider type filters
|
||||
if provider_type:
|
||||
scan_filters["provider__provider"] = provider_type
|
||||
elif provider_type__in:
|
||||
# Convert comma-separated string to list
|
||||
provider_types = [pt.strip() for pt in provider_type__in.split(",")]
|
||||
scan_filters["provider__provider__in"] = provider_types
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(**scan_filters)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
# Convert to response format with framework/version enrichment
|
||||
response_data = []
|
||||
for summary in summaries:
|
||||
compliance_metadata = compliance_template.get(summary.compliance_id, {})
|
||||
response_data.append(
|
||||
{
|
||||
"id": summary.compliance_id,
|
||||
"compliance_id": summary.compliance_id,
|
||||
"framework": compliance_metadata.get("framework", ""),
|
||||
"version": compliance_metadata.get("version", ""),
|
||||
"requirements_passed": summary.requirements_passed,
|
||||
"requirements_failed": summary.requirements_failed,
|
||||
"requirements_manual": summary.requirements_manual,
|
||||
"total_requirements": summary.total_requirements,
|
||||
}
|
||||
)
|
||||
|
||||
base_queryset = self.get_queryset()
|
||||
queryset = self.filter_queryset(
|
||||
base_queryset.filter(scan_id__in=latest_scan_ids)
|
||||
)
|
||||
|
||||
# Aggregate compliance data across latest scans
|
||||
compliance_template = self._get_compliance_template()
|
||||
data = self._aggregate_compliance_overview(
|
||||
queryset, template_metadata=compliance_template
|
||||
)
|
||||
return Response(data)
|
||||
serializer = self.get_serializer(response_data, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
def metadata(self, request):
|
||||
@@ -4074,6 +4065,34 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
|
||||
),
|
||||
filters=True,
|
||||
),
|
||||
findings_severity_timeseries=extend_schema(
|
||||
summary="Get findings severity data over time",
|
||||
description=(
|
||||
"Retrieve daily aggregated findings data grouped by severity levels over a date range. "
|
||||
"Returns one data point per day with counts of failed findings by severity (critical, high, "
|
||||
"medium, low, informational) and muted findings. Days without scans are filled forward with "
|
||||
"the most recent known values. Use date_from (required) and date_to filters to specify the range."
|
||||
),
|
||||
filters=True,
|
||||
),
|
||||
attack_surface=extend_schema(
|
||||
summary="Get attack surface overview",
|
||||
description="Retrieve aggregated attack surface metrics from latest completed scans per provider.",
|
||||
tags=["Overview"],
|
||||
filters=True,
|
||||
responses={200: AttackSurfaceOverviewSerializer(many=True)},
|
||||
),
|
||||
categories=extend_schema(
|
||||
summary="Get category overview",
|
||||
description=(
|
||||
"Retrieve aggregated category metrics from latest completed scans per provider. "
|
||||
"Returns one row per category with total, failed, and new failed findings counts, "
|
||||
"plus a severity breakdown showing failed findings per severity level. "
|
||||
),
|
||||
tags=["Overview"],
|
||||
filters=True,
|
||||
responses={200: CategoryOverviewSerializer(many=True)},
|
||||
),
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
class OverviewViewSet(BaseRLSViewSet):
|
||||
@@ -4091,7 +4110,16 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
if not role.unlimited_visibility:
|
||||
self.allowed_providers = providers
|
||||
|
||||
return ScanSummary.all_objects.filter(tenant_id=self.request.tenant_id)
|
||||
tenant_id = self.request.tenant_id
|
||||
|
||||
# Return appropriate queryset per action
|
||||
if self.action == "findings_severity_timeseries":
|
||||
qs = DailySeveritySummary.objects.filter(tenant_id=tenant_id)
|
||||
if hasattr(self, "allowed_providers"):
|
||||
qs = qs.filter(provider_id__in=self.allowed_providers)
|
||||
return qs
|
||||
|
||||
return ScanSummary.all_objects.filter(tenant_id=tenant_id)
|
||||
|
||||
def get_serializer_class(self):
|
||||
if self.action == "providers":
|
||||
@@ -4102,12 +4130,18 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
return OverviewFindingSerializer
|
||||
elif self.action == "findings_severity":
|
||||
return OverviewSeveritySerializer
|
||||
elif self.action == "findings_severity_timeseries":
|
||||
return FindingsSeverityOverTimeSerializer
|
||||
elif self.action == "services":
|
||||
return OverviewServiceSerializer
|
||||
elif self.action == "regions":
|
||||
return OverviewRegionSerializer
|
||||
elif self.action == "threatscore":
|
||||
return ThreatScoreSnapshotSerializer
|
||||
elif self.action == "attack_surface":
|
||||
return AttackSurfaceOverviewSerializer
|
||||
elif self.action == "categories":
|
||||
return CategoryOverviewSerializer
|
||||
return super().get_serializer_class()
|
||||
|
||||
def get_filterset_class(self):
|
||||
@@ -4117,8 +4151,22 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
return ScanSummaryFilter
|
||||
elif self.action == "findings_severity":
|
||||
return ScanSummarySeverityFilter
|
||||
elif self.action == "findings_severity_timeseries":
|
||||
return DailySeveritySummaryFilter
|
||||
elif self.action == "categories":
|
||||
return CategoryOverviewFilter
|
||||
elif self.action == "attack_surface":
|
||||
return AttackSurfaceOverviewFilter
|
||||
return None
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
# Skip OrderingFilter for findings_severity_timeseries (no inserted_at field)
|
||||
if self.action == "findings_severity_timeseries":
|
||||
return CustomDjangoFilterBackend().filter_queryset(
|
||||
self.request, queryset, self
|
||||
)
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
@extend_schema(exclude=True)
|
||||
def list(self, request, *args, **kwargs):
|
||||
raise MethodNotAllowed(method="GET")
|
||||
@@ -4156,6 +4204,80 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
|
||||
def _normalize_jsonapi_params(self, query_params, exclude_keys=None):
|
||||
"""Convert JSON:API filter params (filter[X]) to flat params (X)."""
|
||||
exclude_keys = exclude_keys or set()
|
||||
normalized = QueryDict(mutable=True)
|
||||
for key, values in query_params.lists():
|
||||
normalized_key = (
|
||||
key[7:-1] if key.startswith("filter[") and key.endswith("]") else key
|
||||
)
|
||||
if normalized_key not in exclude_keys:
|
||||
normalized.setlist(normalized_key, values)
|
||||
return normalized
|
||||
|
||||
def _ensure_allowed_providers(self):
|
||||
"""Populate allowed providers for RBAC-aware queries once per request."""
|
||||
if getattr(self, "_providers_initialized", False):
|
||||
return
|
||||
self.get_queryset()
|
||||
self._providers_initialized = True
|
||||
|
||||
def _get_provider_filter(self, provider_field="provider"):
|
||||
self._ensure_allowed_providers()
|
||||
if hasattr(self, "allowed_providers"):
|
||||
return {f"{provider_field}__in": self.allowed_providers}
|
||||
return {}
|
||||
|
||||
def _apply_provider_filter(self, queryset, provider_field="provider"):
|
||||
provider_filter = self._get_provider_filter(provider_field)
|
||||
if provider_filter:
|
||||
return queryset.filter(**provider_filter)
|
||||
return queryset
|
||||
|
||||
def _apply_filterset(self, queryset, filterset_class, exclude_keys=None):
|
||||
normalized_params = self._normalize_jsonapi_params(
|
||||
self.request.query_params, exclude_keys=set(exclude_keys or [])
|
||||
)
|
||||
filterset = filterset_class(normalized_params, queryset=queryset)
|
||||
return filterset.qs
|
||||
|
||||
def _latest_scan_ids_for_allowed_providers(self, tenant_id, provider_filters=None):
|
||||
provider_filter = self._get_provider_filter()
|
||||
queryset = Scan.all_objects.filter(
|
||||
tenant_id=tenant_id, state=StateChoices.COMPLETED, **provider_filter
|
||||
)
|
||||
if provider_filters:
|
||||
queryset = queryset.filter(**provider_filters)
|
||||
return (
|
||||
queryset.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
def _extract_provider_filters_from_params(self):
|
||||
"""Extract provider filters from query params to apply on Scan queryset."""
|
||||
params = self.request.query_params
|
||||
filters = {}
|
||||
|
||||
provider_id = params.get("filter[provider_id]")
|
||||
if provider_id:
|
||||
filters["provider_id"] = provider_id
|
||||
|
||||
provider_id_in = params.get("filter[provider_id__in]")
|
||||
if provider_id_in:
|
||||
filters["provider_id__in"] = provider_id_in.split(",")
|
||||
|
||||
provider_type = params.get("filter[provider_type]")
|
||||
if provider_type:
|
||||
filters["provider__provider"] = provider_type
|
||||
|
||||
provider_type_in = params.get("filter[provider_type__in]")
|
||||
if provider_type_in:
|
||||
filters["provider__provider__in"] = provider_type_in.split(",")
|
||||
|
||||
return filters
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="providers")
|
||||
def providers(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
@@ -4333,6 +4455,108 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(
|
||||
detail=False,
|
||||
methods=["get"],
|
||||
url_path="findings_severity/timeseries",
|
||||
url_name="findings_severity_timeseries",
|
||||
)
|
||||
def findings_severity_timeseries(self, request):
|
||||
"""
|
||||
Daily severity trends for charts. Uses DailySeveritySummary pre-aggregation.
|
||||
Requires date_from filter.
|
||||
"""
|
||||
# Get queryset with RBAC, provider, and date filters applied
|
||||
# Date validation is handled by DailySeveritySummaryFilter
|
||||
daily_qs = self.filter_queryset(self.get_queryset())
|
||||
|
||||
date_from = request._date_from
|
||||
date_to = request._date_to
|
||||
|
||||
if not daily_qs.exists():
|
||||
# No data matches filters - return zeros
|
||||
result = self._generate_zero_result(date_from, date_to)
|
||||
serializer = self.get_serializer(result, many=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
# Fetch all data for fill-forward logic
|
||||
daily_summaries = list(
|
||||
daily_qs.order_by("provider_id", "-date").values(
|
||||
"provider_id",
|
||||
"scan_id",
|
||||
"date",
|
||||
"critical",
|
||||
"high",
|
||||
"medium",
|
||||
"low",
|
||||
"informational",
|
||||
"muted",
|
||||
)
|
||||
)
|
||||
|
||||
if not daily_summaries:
|
||||
result = self._generate_zero_result(date_from, date_to)
|
||||
serializer = self.get_serializer(result, many=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
# Build provider_data: {provider_id: [(date, data), ...]} sorted by date desc
|
||||
provider_data = defaultdict(list)
|
||||
for summary in daily_summaries:
|
||||
provider_data[summary["provider_id"]].append(summary)
|
||||
|
||||
# For each day, find the latest data per provider and sum values
|
||||
result = []
|
||||
current_date = date_from
|
||||
while current_date <= date_to:
|
||||
day_totals = {
|
||||
"critical": 0,
|
||||
"high": 0,
|
||||
"medium": 0,
|
||||
"low": 0,
|
||||
"informational": 0,
|
||||
"muted": 0,
|
||||
}
|
||||
day_scan_ids = []
|
||||
|
||||
for provider_id, summaries in provider_data.items():
|
||||
# Find the latest data for this provider <= current_date
|
||||
for summary in summaries: # Already sorted by date desc
|
||||
if summary["date"] <= current_date:
|
||||
day_totals["critical"] += summary["critical"] or 0
|
||||
day_totals["high"] += summary["high"] or 0
|
||||
day_totals["medium"] += summary["medium"] or 0
|
||||
day_totals["low"] += summary["low"] or 0
|
||||
day_totals["informational"] += summary["informational"] or 0
|
||||
day_totals["muted"] += summary["muted"] or 0
|
||||
day_scan_ids.append(summary["scan_id"])
|
||||
break # Found the latest data for this provider
|
||||
|
||||
result.append(
|
||||
{"date": current_date, "scan_ids": day_scan_ids, **day_totals}
|
||||
)
|
||||
current_date += timedelta(days=1)
|
||||
|
||||
serializer = self.get_serializer(result, many=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
def _generate_zero_result(self, date_from, date_to):
|
||||
"""Generate a list of zero-filled results for each date in range."""
|
||||
result = []
|
||||
current_date = date_from
|
||||
zero_values = {
|
||||
"critical": 0,
|
||||
"high": 0,
|
||||
"medium": 0,
|
||||
"low": 0,
|
||||
"informational": 0,
|
||||
"muted": 0,
|
||||
"scan_ids": [],
|
||||
}
|
||||
while current_date <= date_to:
|
||||
result.append({"date": current_date, **zero_values})
|
||||
current_date += timedelta(days=1)
|
||||
return result
|
||||
|
||||
@extend_schema(
|
||||
summary="Get ThreatScore snapshots",
|
||||
description=(
|
||||
@@ -4385,11 +4609,9 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
snapshot_id = request.query_params.get("snapshot_id")
|
||||
|
||||
# Base queryset with RLS
|
||||
base_queryset = ThreatScoreSnapshot.objects.filter(tenant_id=tenant_id)
|
||||
|
||||
# Apply RBAC filtering
|
||||
if hasattr(self, "allowed_providers"):
|
||||
base_queryset = base_queryset.filter(provider__in=self.allowed_providers)
|
||||
base_queryset = self._apply_provider_filter(
|
||||
ThreatScoreSnapshot.objects.filter(tenant_id=tenant_id)
|
||||
)
|
||||
|
||||
# Case 1: Specific snapshot requested
|
||||
if snapshot_id:
|
||||
@@ -4405,17 +4627,9 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
# Case 2: Latest snapshot per provider (default)
|
||||
# Apply filters manually: this @action is outside the standard list endpoint flow,
|
||||
# so DRF's filter backends don't execute and we must flatten JSON:API params ourselves.
|
||||
normalized_params = QueryDict(mutable=True)
|
||||
for param_key, values in request.query_params.lists():
|
||||
normalized_key = param_key
|
||||
if param_key.startswith("filter[") and param_key.endswith("]"):
|
||||
normalized_key = param_key[7:-1]
|
||||
if normalized_key == "snapshot_id":
|
||||
continue
|
||||
normalized_params.setlist(normalized_key, values)
|
||||
|
||||
filterset = ThreatScoreSnapshotFilter(normalized_params, queryset=base_queryset)
|
||||
filtered_queryset = filterset.qs
|
||||
filtered_queryset = self._apply_filterset(
|
||||
base_queryset, ThreatScoreSnapshotFilter, exclude_keys={"snapshot_id"}
|
||||
)
|
||||
|
||||
# Get distinct provider IDs from filtered queryset
|
||||
# Pick the latest snapshot per provider using Postgres DISTINCT ON pattern.
|
||||
@@ -4659,6 +4873,117 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
|
||||
return aggregated_snapshot
|
||||
|
||||
@action(
|
||||
detail=False,
|
||||
methods=["get"],
|
||||
url_name="attack-surface",
|
||||
url_path="attack-surfaces",
|
||||
)
|
||||
def attack_surface(self, request):
|
||||
tenant_id = request.tenant_id
|
||||
latest_scan_ids = self._latest_scan_ids_for_allowed_providers(tenant_id)
|
||||
|
||||
base_queryset = AttackSurfaceOverview.objects.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
filtered_queryset = self._apply_filterset(
|
||||
base_queryset, AttackSurfaceOverviewFilter
|
||||
)
|
||||
|
||||
aggregation = filtered_queryset.values("attack_surface_type").annotate(
|
||||
total_findings=Coalesce(Sum("total_findings"), 0),
|
||||
failed_findings=Coalesce(Sum("failed_findings"), 0),
|
||||
muted_failed_findings=Coalesce(Sum("muted_failed_findings"), 0),
|
||||
)
|
||||
|
||||
results = {
|
||||
attack_surface_type: {
|
||||
"total_findings": 0,
|
||||
"failed_findings": 0,
|
||||
"muted_failed_findings": 0,
|
||||
}
|
||||
for attack_surface_type in AttackSurfaceOverview.AttackSurfaceTypeChoices.values
|
||||
}
|
||||
for item in aggregation:
|
||||
results[item["attack_surface_type"]] = {
|
||||
"total_findings": item["total_findings"],
|
||||
"failed_findings": item["failed_findings"],
|
||||
"muted_failed_findings": item["muted_failed_findings"],
|
||||
}
|
||||
|
||||
response_data = [
|
||||
{"attack_surface_type": key, **value} for key, value in results.items()
|
||||
]
|
||||
|
||||
return Response(
|
||||
self.get_serializer(response_data, many=True).data,
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="categories")
|
||||
def categories(self, request):
|
||||
tenant_id = request.tenant_id
|
||||
provider_filters = self._extract_provider_filters_from_params()
|
||||
latest_scan_ids = self._latest_scan_ids_for_allowed_providers(
|
||||
tenant_id, provider_filters
|
||||
)
|
||||
|
||||
base_queryset = ScanCategorySummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
provider_filter_keys = {
|
||||
"provider_id",
|
||||
"provider_id__in",
|
||||
"provider_type",
|
||||
"provider_type__in",
|
||||
}
|
||||
filtered_queryset = self._apply_filterset(
|
||||
base_queryset, CategoryOverviewFilter, exclude_keys=provider_filter_keys
|
||||
)
|
||||
|
||||
aggregation = (
|
||||
filtered_queryset.values("category", "severity")
|
||||
.annotate(
|
||||
total=Coalesce(Sum("total_findings"), 0),
|
||||
failed=Coalesce(Sum("failed_findings"), 0),
|
||||
new_failed=Coalesce(Sum("new_failed_findings"), 0),
|
||||
)
|
||||
.order_by("category", "severity")
|
||||
)
|
||||
|
||||
category_data = defaultdict(
|
||||
lambda: {
|
||||
"total_findings": 0,
|
||||
"failed_findings": 0,
|
||||
"new_failed_findings": 0,
|
||||
"severity": {
|
||||
"informational": 0,
|
||||
"low": 0,
|
||||
"medium": 0,
|
||||
"high": 0,
|
||||
"critical": 0,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
for row in aggregation:
|
||||
cat = row["category"]
|
||||
sev = row["severity"]
|
||||
category_data[cat]["total_findings"] += row["total"]
|
||||
category_data[cat]["failed_findings"] += row["failed"]
|
||||
category_data[cat]["new_failed_findings"] += row["new_failed"]
|
||||
if sev in category_data[cat]["severity"]:
|
||||
category_data[cat]["severity"][sev] = row["failed"]
|
||||
|
||||
response_data = [
|
||||
{"category": cat, **data} for cat, data in sorted(category_data.items())
|
||||
]
|
||||
|
||||
return Response(
|
||||
self.get_serializer(response_data, many=True).data,
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(tags=["Schedule"])
|
||||
@extend_schema_view(
|
||||
|
||||
@@ -36,6 +36,14 @@ DATABASES = {
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
"admin_replica": {
|
||||
"ENGINE": "psqlextra.backend",
|
||||
"NAME": env("POSTGRES_REPLICA_DB", default=default_db_name),
|
||||
"USER": env("POSTGRES_ADMIN_USER", default="prowler"),
|
||||
"PASSWORD": env("POSTGRES_ADMIN_PASSWORD", default="S3cret"),
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
}
|
||||
|
||||
DATABASES["default"] = DATABASES["prowler_user"]
|
||||
|
||||
@@ -37,6 +37,14 @@ DATABASES = {
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
"admin_replica": {
|
||||
"ENGINE": "psqlextra.backend",
|
||||
"NAME": env("POSTGRES_REPLICA_DB", default=default_db_name),
|
||||
"USER": env("POSTGRES_ADMIN_USER"),
|
||||
"PASSWORD": env("POSTGRES_ADMIN_PASSWORD"),
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
}
|
||||
|
||||
DATABASES["default"] = DATABASES["prowler_user"]
|
||||
|
||||
@@ -5,6 +5,9 @@ IGNORED_EXCEPTIONS = [
|
||||
# Provider is not connected due to credentials errors
|
||||
"is not connected",
|
||||
"ProviderConnectionError",
|
||||
# Provider was deleted during a scan
|
||||
"ProviderDeletedException",
|
||||
"violates foreign key constraint",
|
||||
# Authentication Errors from AWS
|
||||
"InvalidToken",
|
||||
"AccessDeniedException",
|
||||
|
||||
@@ -11,10 +11,14 @@ from django.urls import reverse
|
||||
from django_celery_results.models import TaskResult
|
||||
from rest_framework import status
|
||||
from rest_framework.test import APIClient
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_resource_scan_summaries,
|
||||
backfill_scan_category_summaries,
|
||||
)
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
AttackSurfaceOverview,
|
||||
ComplianceOverview,
|
||||
ComplianceRequirementOverview,
|
||||
Finding,
|
||||
@@ -35,6 +39,7 @@ from api.models import (
|
||||
SAMLConfiguration,
|
||||
SAMLDomainIndex,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
ScanSummary,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
@@ -1270,6 +1275,113 @@ def latest_scan_finding(authenticated_client, providers_fixture, resources_fixtu
|
||||
return finding
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def findings_with_categories(scans_fixture, resources_fixture):
|
||||
scan = scans_fixture[0]
|
||||
resource = resources_fixture[0]
|
||||
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
uid="finding_with_categories_1",
|
||||
scan=scan,
|
||||
delta=None,
|
||||
status=Status.FAIL,
|
||||
status_extended="test status",
|
||||
impact=Severity.critical,
|
||||
impact_extended="test impact",
|
||||
severity=Severity.critical,
|
||||
raw_result={"status": Status.FAIL},
|
||||
check_id="genai_check",
|
||||
check_metadata={"CheckId": "genai_check"},
|
||||
categories=["gen-ai", "security"],
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
finding.add_resources([resource])
|
||||
backfill_resource_scan_summaries(str(scan.tenant_id), str(scan.id))
|
||||
return finding
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def findings_with_multiple_categories(scans_fixture, resources_fixture):
|
||||
scan = scans_fixture[0]
|
||||
resource1, resource2 = resources_fixture[:2]
|
||||
|
||||
finding1 = Finding.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
uid="finding_multi_cat_1",
|
||||
scan=scan,
|
||||
delta=None,
|
||||
status=Status.FAIL,
|
||||
status_extended="test status",
|
||||
impact=Severity.critical,
|
||||
impact_extended="test impact",
|
||||
severity=Severity.critical,
|
||||
raw_result={"status": Status.FAIL},
|
||||
check_id="genai_check",
|
||||
check_metadata={"CheckId": "genai_check"},
|
||||
categories=["gen-ai", "security"],
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
finding1.add_resources([resource1])
|
||||
|
||||
finding2 = Finding.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
uid="finding_multi_cat_2",
|
||||
scan=scan,
|
||||
delta=None,
|
||||
status=Status.FAIL,
|
||||
status_extended="test status 2",
|
||||
impact=Severity.high,
|
||||
impact_extended="test impact 2",
|
||||
severity=Severity.high,
|
||||
raw_result={"status": Status.FAIL},
|
||||
check_id="iam_check",
|
||||
check_metadata={"CheckId": "iam_check"},
|
||||
categories=["iam", "security"],
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
finding2.add_resources([resource2])
|
||||
|
||||
backfill_resource_scan_summaries(str(scan.tenant_id), str(scan.id))
|
||||
return finding1, finding2
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def latest_scan_finding_with_categories(
|
||||
authenticated_client, providers_fixture, resources_fixture
|
||||
):
|
||||
provider = providers_fixture[0]
|
||||
tenant_id = str(providers_fixture[0].tenant_id)
|
||||
resource = resources_fixture[0]
|
||||
scan = Scan.objects.create(
|
||||
name="latest completed scan with categories",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid="latest_finding_with_categories",
|
||||
scan=scan,
|
||||
delta="new",
|
||||
status=Status.FAIL,
|
||||
status_extended="test status",
|
||||
impact=Severity.critical,
|
||||
impact_extended="test impact",
|
||||
severity=Severity.critical,
|
||||
raw_result={"status": Status.FAIL},
|
||||
check_id="genai_iam_check",
|
||||
check_metadata={"CheckId": "genai_iam_check"},
|
||||
categories=["gen-ai", "iam"],
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
finding.add_resources([resource])
|
||||
backfill_resource_scan_summaries(tenant_id, str(scan.id))
|
||||
backfill_scan_category_summaries(tenant_id, str(scan.id))
|
||||
return finding
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def latest_scan_resource(authenticated_client, providers_fixture):
|
||||
provider = providers_fixture[0]
|
||||
@@ -1469,6 +1581,45 @@ def mute_rules_fixture(tenants_fixture, create_test_user, findings_fixture):
|
||||
return mute_rule1, mute_rule2
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_attack_surface_overview():
|
||||
def _create(tenant, scan, attack_surface_type, total=10, failed=5, muted_failed=2):
|
||||
return AttackSurfaceOverview.objects.create(
|
||||
tenant=tenant,
|
||||
scan=scan,
|
||||
attack_surface_type=attack_surface_type,
|
||||
total_findings=total,
|
||||
failed_findings=failed,
|
||||
muted_failed_findings=muted_failed,
|
||||
)
|
||||
|
||||
return _create
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_scan_category_summary():
|
||||
def _create(
|
||||
tenant,
|
||||
scan,
|
||||
category,
|
||||
severity,
|
||||
total_findings=10,
|
||||
failed_findings=5,
|
||||
new_failed_findings=2,
|
||||
):
|
||||
return ScanCategorySummary.objects.create(
|
||||
tenant=tenant,
|
||||
scan=scan,
|
||||
category=category,
|
||||
severity=severity,
|
||||
total_findings=total_findings,
|
||||
failed_findings=failed_findings,
|
||||
new_failed_findings=new_failed_findings,
|
||||
)
|
||||
|
||||
return _create
|
||||
|
||||
|
||||
def get_authorization_header(access_token: str) -> dict:
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
||||
|
||||
@@ -61,4 +61,5 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
countdown=5, # Avoid race conditions between the worker and the database
|
||||
)
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
from collections import defaultdict
|
||||
from datetime import timedelta
|
||||
|
||||
from django.db.models import Sum
|
||||
from django.utils import timezone
|
||||
from tasks.jobs.scan import aggregate_category_counts
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
ComplianceOverviewSummary,
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
ScanSummary,
|
||||
StateChoices,
|
||||
)
|
||||
|
||||
@@ -175,3 +184,160 @@ def backfill_compliance_summaries(tenant_id: str, scan_id: str):
|
||||
)
|
||||
|
||||
return {"status": "backfilled", "inserted": len(summary_objects)}
|
||||
|
||||
|
||||
def backfill_daily_severity_summaries(tenant_id: str, days: int = None):
|
||||
"""
|
||||
Backfill DailySeveritySummary from completed scans.
|
||||
Groups by provider+date, keeps latest scan per day.
|
||||
"""
|
||||
created_count = 0
|
||||
updated_count = 0
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
scan_filter = {
|
||||
"tenant_id": tenant_id,
|
||||
"state": StateChoices.COMPLETED,
|
||||
"completed_at__isnull": False,
|
||||
}
|
||||
|
||||
if days is not None:
|
||||
cutoff_date = timezone.now() - timedelta(days=days)
|
||||
scan_filter["completed_at__gte"] = cutoff_date
|
||||
|
||||
completed_scans = (
|
||||
Scan.objects.filter(**scan_filter)
|
||||
.order_by("provider_id", "-completed_at")
|
||||
.values("id", "provider_id", "completed_at")
|
||||
)
|
||||
|
||||
if not completed_scans:
|
||||
return {"status": "no scans to backfill"}
|
||||
|
||||
# Keep only latest scan per provider/day
|
||||
latest_scans_by_day = {}
|
||||
for scan in completed_scans:
|
||||
key = (scan["provider_id"], scan["completed_at"].date())
|
||||
if key not in latest_scans_by_day:
|
||||
latest_scans_by_day[key] = scan
|
||||
|
||||
# Process each provider/day
|
||||
for (provider_id, scan_date), scan in latest_scans_by_day.items():
|
||||
scan_id = scan["id"]
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
severity_totals = (
|
||||
ScanSummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
)
|
||||
.values("severity")
|
||||
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
|
||||
)
|
||||
|
||||
severity_data = {
|
||||
"critical": 0,
|
||||
"high": 0,
|
||||
"medium": 0,
|
||||
"low": 0,
|
||||
"informational": 0,
|
||||
"muted": 0,
|
||||
}
|
||||
|
||||
for row in severity_totals:
|
||||
severity = row["severity"]
|
||||
if severity in severity_data:
|
||||
severity_data[severity] = row["total_fail"] or 0
|
||||
severity_data["muted"] += row["total_muted"] or 0
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
_, created = DailySeveritySummary.objects.update_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider_id,
|
||||
date=scan_date,
|
||||
defaults={
|
||||
"scan_id": scan_id,
|
||||
"critical": severity_data["critical"],
|
||||
"high": severity_data["high"],
|
||||
"medium": severity_data["medium"],
|
||||
"low": severity_data["low"],
|
||||
"informational": severity_data["informational"],
|
||||
"muted": severity_data["muted"],
|
||||
},
|
||||
)
|
||||
|
||||
if created:
|
||||
created_count += 1
|
||||
else:
|
||||
updated_count += 1
|
||||
|
||||
return {
|
||||
"status": "backfilled",
|
||||
"created": created_count,
|
||||
"updated": updated_count,
|
||||
"total_days": len(latest_scans_by_day),
|
||||
}
|
||||
|
||||
|
||||
def backfill_scan_category_summaries(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Backfill ScanCategorySummary for a completed scan.
|
||||
|
||||
Aggregates category counts from all findings in the scan and creates
|
||||
one ScanCategorySummary row per (category, severity) combination.
|
||||
|
||||
Args:
|
||||
tenant_id: Target tenant UUID
|
||||
scan_id: Scan UUID to backfill
|
||||
|
||||
Returns:
|
||||
dict: Status indicating whether backfill was performed
|
||||
"""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
if ScanCategorySummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists():
|
||||
return {"status": "already backfilled"}
|
||||
|
||||
if not Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id=scan_id,
|
||||
state__in=(StateChoices.COMPLETED, StateChoices.FAILED),
|
||||
).exists():
|
||||
return {"status": "scan is not completed"}
|
||||
|
||||
category_counts: dict[tuple[str, str], dict[str, int]] = {}
|
||||
for finding in Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).values("categories", "severity", "status", "delta", "muted"):
|
||||
aggregate_category_counts(
|
||||
categories=finding.get("categories") or [],
|
||||
severity=finding.get("severity"),
|
||||
status=finding.get("status"),
|
||||
delta=finding.get("delta"),
|
||||
muted=finding.get("muted", False),
|
||||
cache=category_counts,
|
||||
)
|
||||
|
||||
if not category_counts:
|
||||
return {"status": "no categories to backfill"}
|
||||
|
||||
category_summaries = [
|
||||
ScanCategorySummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
category=category,
|
||||
severity=severity,
|
||||
total_findings=counts["total"],
|
||||
failed_findings=counts["failed"],
|
||||
new_failed_findings=counts["new_failed"],
|
||||
)
|
||||
for (category, severity), counts in category_counts.items()
|
||||
]
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
ScanCategorySummary.objects.bulk_create(
|
||||
category_summaries, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
|
||||
return {"status": "backfilled", "categories_count": len(category_counts)}
|
||||
|
||||
@@ -2,6 +2,8 @@ from typing import Dict
|
||||
|
||||
import boto3
|
||||
import openai
|
||||
from botocore import UNSIGNED
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import BotoCoreError, ClientError
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
@@ -9,6 +11,74 @@ from api.models import LighthouseProviderConfiguration, LighthouseProviderModels
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
# OpenAI model prefixes to exclude from Lighthouse model selection.
|
||||
# These models don't support text chat completions and tool calling.
|
||||
EXCLUDED_OPENAI_MODEL_PREFIXES = (
|
||||
"dall-e", # Image generation
|
||||
"whisper", # Audio transcription
|
||||
"tts-", # Text-to-speech (tts-1, tts-1-hd, etc.)
|
||||
"sora", # Text-to-video (sora-2, sora-2-pro, etc.)
|
||||
"text-embedding", # Embeddings
|
||||
"embedding", # Embeddings (alternative naming)
|
||||
"text-moderation", # Content moderation
|
||||
"omni-moderation", # Content moderation
|
||||
"text-davinci", # Legacy completion models
|
||||
"text-curie", # Legacy completion models
|
||||
"text-babbage", # Legacy completion models
|
||||
"text-ada", # Legacy completion models
|
||||
"davinci", # Legacy completion models
|
||||
"curie", # Legacy completion models
|
||||
"babbage", # Legacy completion models
|
||||
"ada", # Legacy completion models
|
||||
"computer-use", # Computer control agent
|
||||
"gpt-image", # Image generation
|
||||
"gpt-audio", # Audio models
|
||||
"gpt-realtime", # Realtime voice API
|
||||
)
|
||||
|
||||
# OpenAI model substrings to exclude (patterns that can appear anywhere in model ID).
|
||||
# These patterns identify non-chat model variants.
|
||||
EXCLUDED_OPENAI_MODEL_SUBSTRINGS = (
|
||||
"-audio-", # Audio preview models (gpt-4o-audio-preview, etc.)
|
||||
"-realtime-", # Realtime preview models (gpt-4o-realtime-preview, etc.)
|
||||
"-transcribe", # Transcription models (gpt-4o-transcribe, etc.)
|
||||
"-tts", # TTS models (gpt-4o-mini-tts)
|
||||
"-instruct", # Legacy instruct models (gpt-3.5-turbo-instruct, etc.)
|
||||
)
|
||||
|
||||
|
||||
def _extract_error_message(e: Exception) -> str:
|
||||
"""
|
||||
Extract a user-friendly error message from various exception types.
|
||||
|
||||
This function handles exceptions from different providers (OpenAI, AWS Bedrock)
|
||||
and extracts the most relevant error message for display to users.
|
||||
|
||||
Args:
|
||||
e: The exception to extract a message from.
|
||||
|
||||
Returns:
|
||||
str: A user-friendly error message.
|
||||
"""
|
||||
# For OpenAI SDK errors (>= v1.0)
|
||||
# OpenAI exceptions have a 'body' attribute with error details
|
||||
if hasattr(e, "body") and isinstance(e.body, dict):
|
||||
if "message" in e.body:
|
||||
return e.body["message"]
|
||||
# Sometimes nested under 'error' key
|
||||
if "error" in e.body and isinstance(e.body["error"], dict):
|
||||
return e.body["error"].get("message", str(e))
|
||||
|
||||
# For boto3 ClientError
|
||||
# Boto3 exceptions have a 'response' attribute with error details
|
||||
if hasattr(e, "response") and isinstance(e.response, dict):
|
||||
error_info = e.response.get("Error", {})
|
||||
if error_info.get("Message"):
|
||||
return error_info["Message"]
|
||||
|
||||
# Fallback to string representation for unknown error types
|
||||
return str(e)
|
||||
|
||||
|
||||
def _extract_openai_api_key(
|
||||
provider_cfg: LighthouseProviderConfiguration,
|
||||
@@ -56,21 +126,39 @@ def _extract_bedrock_credentials(
|
||||
"""
|
||||
Safely extract AWS Bedrock credentials from a provider configuration.
|
||||
|
||||
Supports two authentication methods:
|
||||
1. AWS access key + secret key + region
|
||||
2. Bedrock API key (bearer token) + region
|
||||
|
||||
Args:
|
||||
provider_cfg (LighthouseProviderConfiguration): The provider configuration instance
|
||||
containing the credentials.
|
||||
|
||||
Returns:
|
||||
Dict[str, str] | None: Dictionary with 'access_key_id', 'secret_access_key', and
|
||||
'region' if present and valid, otherwise None.
|
||||
Dict[str, str] | None: Dictionary with either:
|
||||
- 'access_key_id', 'secret_access_key', and 'region' for access key auth
|
||||
- 'api_key' and 'region' for API key (bearer token) auth
|
||||
Returns None if credentials are invalid or missing.
|
||||
"""
|
||||
creds = provider_cfg.credentials_decoded
|
||||
if not isinstance(creds, dict):
|
||||
return None
|
||||
|
||||
region = creds.get("region")
|
||||
if not isinstance(region, str) or not region:
|
||||
return None
|
||||
|
||||
# Check for API key authentication first
|
||||
api_key = creds.get("api_key")
|
||||
if isinstance(api_key, str) and api_key:
|
||||
return {
|
||||
"api_key": api_key,
|
||||
"region": region,
|
||||
}
|
||||
|
||||
# Fall back to access key authentication
|
||||
access_key_id = creds.get("access_key_id")
|
||||
secret_access_key = creds.get("secret_access_key")
|
||||
region = creds.get("region")
|
||||
|
||||
# Validate all required fields are present and are strings
|
||||
if (
|
||||
@@ -78,8 +166,6 @@ def _extract_bedrock_credentials(
|
||||
or not access_key_id
|
||||
or not isinstance(secret_access_key, str)
|
||||
or not secret_access_key
|
||||
or not isinstance(region, str)
|
||||
or not region
|
||||
):
|
||||
return None
|
||||
|
||||
@@ -90,6 +176,51 @@ def _extract_bedrock_credentials(
|
||||
}
|
||||
|
||||
|
||||
def _create_bedrock_client(
|
||||
bedrock_creds: Dict[str, str], service_name: str = "bedrock"
|
||||
):
|
||||
"""
|
||||
Create a boto3 Bedrock client with the appropriate authentication method.
|
||||
|
||||
Supports two authentication methods:
|
||||
1. API key (bearer token) - uses unsigned requests with Authorization header
|
||||
2. AWS access key + secret key - uses standard SigV4 signing
|
||||
|
||||
Args:
|
||||
bedrock_creds: Dictionary with either:
|
||||
- 'api_key' and 'region' for API key (bearer token) auth
|
||||
- 'access_key_id', 'secret_access_key', and 'region' for access key auth
|
||||
service_name: The Bedrock service name. Use 'bedrock' for control plane
|
||||
operations (list_foundation_models, etc.) or 'bedrock-runtime' for
|
||||
inference operations.
|
||||
|
||||
Returns:
|
||||
boto3 client configured for the specified Bedrock service.
|
||||
"""
|
||||
region = bedrock_creds["region"]
|
||||
|
||||
if "api_key" in bedrock_creds:
|
||||
bearer_token = bedrock_creds["api_key"]
|
||||
client = boto3.client(
|
||||
service_name=service_name,
|
||||
region_name=region,
|
||||
config=Config(signature_version=UNSIGNED),
|
||||
)
|
||||
|
||||
def inject_bearer_token(request, **kwargs):
|
||||
request.headers["Authorization"] = f"Bearer {bearer_token}"
|
||||
|
||||
client.meta.events.register("before-send.*.*", inject_bearer_token)
|
||||
return client
|
||||
|
||||
return boto3.client(
|
||||
service_name=service_name,
|
||||
region_name=region,
|
||||
aws_access_key_id=bedrock_creds["access_key_id"],
|
||||
aws_secret_access_key=bedrock_creds["secret_access_key"],
|
||||
)
|
||||
|
||||
|
||||
def check_lighthouse_provider_connection(provider_config_id: str) -> Dict:
|
||||
"""
|
||||
Validate a Lighthouse provider configuration by calling the provider API and
|
||||
@@ -141,12 +272,7 @@ def check_lighthouse_provider_connection(provider_config_id: str) -> Dict:
|
||||
}
|
||||
|
||||
# Test connection by listing foundation models
|
||||
bedrock_client = boto3.client(
|
||||
"bedrock",
|
||||
aws_access_key_id=bedrock_creds["access_key_id"],
|
||||
aws_secret_access_key=bedrock_creds["secret_access_key"],
|
||||
region_name=bedrock_creds["region"],
|
||||
)
|
||||
bedrock_client = _create_bedrock_client(bedrock_creds)
|
||||
_ = bedrock_client.list_foundation_models()
|
||||
|
||||
elif (
|
||||
@@ -179,32 +305,54 @@ def check_lighthouse_provider_connection(provider_config_id: str) -> Dict:
|
||||
return {"connected": True, "error": None}
|
||||
|
||||
except Exception as e:
|
||||
error_message = _extract_error_message(e)
|
||||
logger.warning(
|
||||
"%s connection check failed: %s", provider_cfg.provider_type, str(e)
|
||||
"%s connection check failed: %s", provider_cfg.provider_type, error_message
|
||||
)
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {"connected": False, "error": str(e)}
|
||||
return {"connected": False, "error": error_message}
|
||||
|
||||
|
||||
def _fetch_openai_models(api_key: str) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch available models from OpenAI API.
|
||||
|
||||
Filters out models that don't support text input/output and tool calling,
|
||||
such as image generation (DALL-E), audio transcription (Whisper),
|
||||
text-to-speech (TTS), embeddings, and moderation models.
|
||||
|
||||
Args:
|
||||
api_key: OpenAI API key for authentication.
|
||||
|
||||
Returns:
|
||||
Dict mapping model_id to model_name. For OpenAI, both are the same
|
||||
as the API doesn't provide separate display names.
|
||||
as the API doesn't provide separate display names. Only includes
|
||||
models that support text input, text output or tool calling.
|
||||
|
||||
Raises:
|
||||
Exception: If the API call fails.
|
||||
"""
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
models = client.models.list()
|
||||
# OpenAI uses model.id for both ID and display name
|
||||
return {m.id: m.id for m in getattr(models, "data", [])}
|
||||
|
||||
# Filter models to only include those supporting chat completions + tool calling
|
||||
filtered_models = {}
|
||||
for model in getattr(models, "data", []):
|
||||
model_id = model.id
|
||||
|
||||
# Skip if model ID starts with excluded prefixes
|
||||
if model_id.startswith(EXCLUDED_OPENAI_MODEL_PREFIXES):
|
||||
continue
|
||||
|
||||
# Skip if model ID contains excluded substrings
|
||||
if any(substring in model_id for substring in EXCLUDED_OPENAI_MODEL_SUBSTRINGS):
|
||||
continue
|
||||
|
||||
# Include model (supports chat completions + tool calling)
|
||||
filtered_models[model_id] = model_id
|
||||
|
||||
return filtered_models
|
||||
|
||||
|
||||
def _fetch_openai_compatible_models(base_url: str, api_key: str) -> Dict[str, str]:
|
||||
@@ -232,105 +380,219 @@ def _fetch_openai_compatible_models(base_url: str, api_key: str) -> Dict[str, st
|
||||
return available_models
|
||||
|
||||
|
||||
def _fetch_bedrock_models(bedrock_creds: Dict[str, str]) -> Dict[str, str]:
|
||||
def _get_region_prefix(region: str) -> str:
|
||||
"""
|
||||
Fetch available models from AWS Bedrock with entitlement verification.
|
||||
Determine geographic prefix for AWS region.
|
||||
|
||||
This function:
|
||||
1. Lists foundation models with TEXT modality support
|
||||
2. Lists inference profiles with TEXT modality support
|
||||
3. Verifies user has entitlement access to each model
|
||||
Examples: ap-south-1 -> apac, us-east-1 -> us, eu-west-1 -> eu
|
||||
"""
|
||||
if region.startswith(("us-", "ca-", "sa-")):
|
||||
return "us"
|
||||
elif region.startswith("eu-"):
|
||||
return "eu"
|
||||
elif region.startswith("ap-"):
|
||||
return "apac"
|
||||
return "global"
|
||||
|
||||
Args:
|
||||
bedrock_creds: Dictionary with 'access_key_id', 'secret_access_key', and 'region'.
|
||||
|
||||
def _clean_inference_profile_name(profile_name: str) -> str:
|
||||
"""
|
||||
Remove geographic prefix from inference profile name.
|
||||
|
||||
AWS includes geographic prefixes in profile names which are redundant
|
||||
since the profile ID already contains this information.
|
||||
|
||||
Examples:
|
||||
"APAC Anthropic Claude 3.5 Sonnet" -> "Anthropic Claude 3.5 Sonnet"
|
||||
"GLOBAL Claude Sonnet 4.5" -> "Claude Sonnet 4.5"
|
||||
"US Anthropic Claude 3 Haiku" -> "Anthropic Claude 3 Haiku"
|
||||
"""
|
||||
prefixes = ["APAC ", "GLOBAL ", "US ", "EU ", "APAC-", "GLOBAL-", "US-", "EU-"]
|
||||
|
||||
for prefix in prefixes:
|
||||
if profile_name.upper().startswith(prefix.upper()):
|
||||
return profile_name[len(prefix) :].strip()
|
||||
|
||||
return profile_name
|
||||
|
||||
|
||||
def _supports_text_modality(input_modalities: list, output_modalities: list) -> bool:
|
||||
"""Check if model supports TEXT for both input and output."""
|
||||
return "TEXT" in input_modalities and "TEXT" in output_modalities
|
||||
|
||||
|
||||
def _get_foundation_model_modalities(
|
||||
bedrock_client, model_id: str
|
||||
) -> tuple[list, list] | None:
|
||||
"""
|
||||
Fetch input and output modalities for a foundation model.
|
||||
|
||||
Returns:
|
||||
Dict mapping model_id to model_name for all accessible models.
|
||||
|
||||
Raises:
|
||||
BotoCoreError, ClientError: If AWS API calls fail.
|
||||
(input_modalities, output_modalities) or None if fetch fails
|
||||
"""
|
||||
bedrock_client = boto3.client(
|
||||
"bedrock",
|
||||
aws_access_key_id=bedrock_creds["access_key_id"],
|
||||
aws_secret_access_key=bedrock_creds["secret_access_key"],
|
||||
region_name=bedrock_creds["region"],
|
||||
)
|
||||
try:
|
||||
model_info = bedrock_client.get_foundation_model(modelIdentifier=model_id)
|
||||
model_details = model_info.get("modelDetails", {})
|
||||
input_mods = model_details.get("inputModalities", [])
|
||||
output_mods = model_details.get("outputModalities", [])
|
||||
return (input_mods, output_mods)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.debug("Could not fetch model details for %s: %s", model_id, str(e))
|
||||
return None
|
||||
|
||||
models_to_check: Dict[str, str] = {}
|
||||
|
||||
# Step 1: Get foundation models with TEXT modality
|
||||
def _extract_foundation_model_ids(profile_models: list) -> list[str]:
|
||||
"""
|
||||
Extract foundation model IDs from inference profile model ARNs.
|
||||
|
||||
Args:
|
||||
profile_models: List of model references from inference profile
|
||||
|
||||
Returns:
|
||||
List of foundation model IDs extracted from ARNs
|
||||
"""
|
||||
model_ids = []
|
||||
for model_ref in profile_models:
|
||||
model_arn = model_ref.get("modelArn", "")
|
||||
if "foundation-model/" in model_arn:
|
||||
model_id = model_arn.split("foundation-model/")[1]
|
||||
model_ids.append(model_id)
|
||||
return model_ids
|
||||
|
||||
|
||||
def _build_inference_profile_map(
|
||||
bedrock_client, region: str
|
||||
) -> Dict[str, tuple[str, str]]:
|
||||
"""
|
||||
Build map of foundation_model_id -> best inference profile.
|
||||
|
||||
Returns:
|
||||
Dict mapping foundation_model_id to (profile_id, profile_name)
|
||||
Only includes profiles with TEXT modality support
|
||||
Prefers region-matched profiles over others
|
||||
"""
|
||||
region_prefix = _get_region_prefix(region)
|
||||
model_to_profile: Dict[str, tuple[str, str]] = {}
|
||||
|
||||
try:
|
||||
response = bedrock_client.list_inference_profiles()
|
||||
profiles = response.get("inferenceProfileSummaries", [])
|
||||
|
||||
for profile in profiles:
|
||||
profile_id = profile.get("inferenceProfileId")
|
||||
profile_name = profile.get("inferenceProfileName")
|
||||
|
||||
if not profile_id or not profile_name:
|
||||
continue
|
||||
|
||||
profile_models = profile.get("models", [])
|
||||
if not profile_models:
|
||||
continue
|
||||
|
||||
foundation_model_ids = _extract_foundation_model_ids(profile_models)
|
||||
if not foundation_model_ids:
|
||||
continue
|
||||
|
||||
modalities = _get_foundation_model_modalities(
|
||||
bedrock_client, foundation_model_ids[0]
|
||||
)
|
||||
if not modalities:
|
||||
continue
|
||||
|
||||
input_mods, output_mods = modalities
|
||||
if not _supports_text_modality(input_mods, output_mods):
|
||||
continue
|
||||
|
||||
is_preferred = profile_id.startswith(f"{region_prefix}.")
|
||||
clean_name = _clean_inference_profile_name(profile_name)
|
||||
|
||||
for foundation_model_id in foundation_model_ids:
|
||||
if foundation_model_id not in model_to_profile:
|
||||
model_to_profile[foundation_model_id] = (profile_id, clean_name)
|
||||
elif is_preferred and not model_to_profile[foundation_model_id][
|
||||
0
|
||||
].startswith(f"{region_prefix}."):
|
||||
model_to_profile[foundation_model_id] = (profile_id, clean_name)
|
||||
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.info("Could not fetch inference profiles in %s: %s", region, str(e))
|
||||
|
||||
return model_to_profile
|
||||
|
||||
|
||||
def _check_on_demand_availability(bedrock_client, model_id: str) -> bool:
|
||||
"""Check if an ON_DEMAND foundation model is entitled and available."""
|
||||
try:
|
||||
availability = bedrock_client.get_foundation_model_availability(
|
||||
modelId=model_id
|
||||
)
|
||||
entitlement = availability.get("entitlementAvailability")
|
||||
return entitlement == "AVAILABLE"
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.debug("Could not check availability for %s: %s", model_id, str(e))
|
||||
return False
|
||||
|
||||
|
||||
def _fetch_bedrock_models(bedrock_creds: Dict[str, str]) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch available models from AWS Bedrock, preferring inference profiles over ON_DEMAND.
|
||||
|
||||
Strategy:
|
||||
1. Build map of foundation_model -> best_inference_profile (with TEXT validation)
|
||||
2. For each TEXT-capable foundation model:
|
||||
- Use inference profile ID if available (preferred - better throughput)
|
||||
- Fallback to foundation model ID if only ON_DEMAND available
|
||||
3. Verify entitlement for ON_DEMAND models
|
||||
|
||||
Args:
|
||||
bedrock_creds: Dict with 'region' and auth credentials
|
||||
|
||||
Returns:
|
||||
Dict mapping model_id to model_name. IDs can be:
|
||||
- Inference profile IDs (e.g., "apac.anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
- Foundation model IDs (e.g., "anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
"""
|
||||
bedrock_client = _create_bedrock_client(bedrock_creds)
|
||||
region = bedrock_creds["region"]
|
||||
|
||||
model_to_profile = _build_inference_profile_map(bedrock_client, region)
|
||||
|
||||
foundation_response = bedrock_client.list_foundation_models()
|
||||
model_summaries = foundation_response.get("modelSummaries", [])
|
||||
|
||||
for model in model_summaries:
|
||||
# Check if model supports TEXT input and output modality
|
||||
input_modalities = model.get("inputModalities", [])
|
||||
output_modalities = model.get("outputModalities", [])
|
||||
models_to_return: Dict[str, str] = {}
|
||||
on_demand_models: set[str] = set()
|
||||
|
||||
if "TEXT" not in input_modalities or "TEXT" not in output_modalities:
|
||||
for model in model_summaries:
|
||||
input_mods = model.get("inputModalities", [])
|
||||
output_mods = model.get("outputModalities", [])
|
||||
|
||||
if not _supports_text_modality(input_mods, output_mods):
|
||||
continue
|
||||
|
||||
model_id = model.get("modelId")
|
||||
if not model_id:
|
||||
model_name = model.get("modelName")
|
||||
|
||||
if not model_id or not model_name:
|
||||
continue
|
||||
|
||||
inference_types = model.get("inferenceTypesSupported", [])
|
||||
if model_id in model_to_profile:
|
||||
profile_id, profile_name = model_to_profile[model_id]
|
||||
models_to_return[profile_id] = profile_name
|
||||
else:
|
||||
inference_types = model.get("inferenceTypesSupported", [])
|
||||
if "ON_DEMAND" in inference_types:
|
||||
models_to_return[model_id] = model_name
|
||||
on_demand_models.add(model_id)
|
||||
|
||||
# Only include models with ON_DEMAND inference support
|
||||
if "ON_DEMAND" in inference_types:
|
||||
models_to_check[model_id] = model["modelName"]
|
||||
|
||||
# Step 2: Get inference profiles
|
||||
try:
|
||||
inference_profiles_response = bedrock_client.list_inference_profiles()
|
||||
inference_profiles = inference_profiles_response.get(
|
||||
"inferenceProfileSummaries", []
|
||||
)
|
||||
|
||||
for profile in inference_profiles:
|
||||
# Check if profile supports TEXT modality
|
||||
input_modalities = profile.get("inputModalities", [])
|
||||
output_modalities = profile.get("outputModalities", [])
|
||||
|
||||
if "TEXT" not in input_modalities or "TEXT" not in output_modalities:
|
||||
continue
|
||||
|
||||
profile_id = profile.get("inferenceProfileId")
|
||||
if profile_id:
|
||||
models_to_check[profile_id] = profile["inferenceProfileName"]
|
||||
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.info(
|
||||
"Could not fetch inference profiles in %s: %s",
|
||||
bedrock_creds["region"],
|
||||
str(e),
|
||||
)
|
||||
|
||||
# Step 3: Verify entitlement availability for each model
|
||||
available_models: Dict[str, str] = {}
|
||||
|
||||
for model_id, model_name in models_to_check.items():
|
||||
try:
|
||||
availability = bedrock_client.get_foundation_model_availability(
|
||||
modelId=model_id
|
||||
)
|
||||
|
||||
entitlement = availability.get("entitlementAvailability")
|
||||
|
||||
# Only include models user has access to
|
||||
if entitlement == "AVAILABLE":
|
||||
for model_id, model_name in models_to_return.items():
|
||||
if model_id in on_demand_models:
|
||||
if _check_on_demand_availability(bedrock_client, model_id):
|
||||
available_models[model_id] = model_name
|
||||
else:
|
||||
logger.debug(
|
||||
"Skipping model %s - entitlement status: %s", model_id, entitlement
|
||||
)
|
||||
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.debug(
|
||||
"Could not check availability for model %s: %s", model_id, str(e)
|
||||
)
|
||||
continue
|
||||
else:
|
||||
available_models[model_id] = model_name
|
||||
|
||||
return available_models
|
||||
|
||||
@@ -359,7 +621,6 @@ def refresh_lighthouse_provider_models(provider_config_id: str) -> Dict:
|
||||
provider_cfg = LighthouseProviderConfiguration.objects.get(pk=provider_config_id)
|
||||
fetched_models: Dict[str, str] = {}
|
||||
|
||||
# Fetch models from the appropriate provider
|
||||
try:
|
||||
if (
|
||||
provider_cfg.provider_type
|
||||
@@ -414,12 +675,13 @@ def refresh_lighthouse_provider_models(provider_config_id: str) -> Dict:
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_message = _extract_error_message(e)
|
||||
logger.warning(
|
||||
"Unexpected error refreshing %s models: %s",
|
||||
provider_cfg.provider_type,
|
||||
str(e),
|
||||
error_message,
|
||||
)
|
||||
return {"created": 0, "updated": 0, "deleted": 0, "error": str(e)}
|
||||
return {"created": 0, "updated": 0, "deleted": 0, "error": error_message}
|
||||
|
||||
# Upsert models into the catalog
|
||||
created = 0
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import io
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
|
||||
@@ -772,7 +773,9 @@ def _create_section_score_chart(
|
||||
return buffer
|
||||
|
||||
|
||||
def _add_pdf_footer(canvas_obj: canvas.Canvas, doc: SimpleDocTemplate) -> None:
|
||||
def _add_pdf_footer(
|
||||
canvas_obj: canvas.Canvas, doc: SimpleDocTemplate, compliance_name: str
|
||||
) -> None:
|
||||
"""
|
||||
Add footer with page number and branding to each page of the PDF.
|
||||
|
||||
@@ -782,7 +785,9 @@ def _add_pdf_footer(canvas_obj: canvas.Canvas, doc: SimpleDocTemplate) -> None:
|
||||
"""
|
||||
canvas_obj.saveState()
|
||||
width, height = doc.pagesize
|
||||
page_num_text = f"Página {doc.page}"
|
||||
page_num_text = (
|
||||
f"{'Página' if 'ens' in compliance_name.lower() else 'Page'} {doc.page}"
|
||||
)
|
||||
canvas_obj.setFont("PlusJakartaSans", 9)
|
||||
canvas_obj.setFillColorRGB(0.4, 0.4, 0.4)
|
||||
canvas_obj.drawString(30, 20, page_num_text)
|
||||
@@ -1595,7 +1600,11 @@ def generate_threatscore_report(
|
||||
elements.append(PageBreak())
|
||||
|
||||
# Build the PDF
|
||||
doc.build(elements, onFirstPage=_add_pdf_footer, onLaterPages=_add_pdf_footer)
|
||||
doc.build(
|
||||
elements,
|
||||
onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
)
|
||||
except Exception as e:
|
||||
tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
|
||||
logger.info(f"Error building the document, line {tb_lineno} -- {e}")
|
||||
@@ -2229,12 +2238,20 @@ def generate_ens_report(
|
||||
[
|
||||
"CUMPLE",
|
||||
str(passed_requirements),
|
||||
f"{(passed_requirements / total_requirements * 100):.1f}%",
|
||||
(
|
||||
f"{(passed_requirements / total_requirements * 100):.1f}%"
|
||||
if total_requirements > 0
|
||||
else "0.0%"
|
||||
),
|
||||
],
|
||||
[
|
||||
"NO CUMPLE",
|
||||
str(failed_requirements),
|
||||
f"{(failed_requirements / total_requirements * 100):.1f}%",
|
||||
(
|
||||
f"{(failed_requirements / total_requirements * 100):.1f}%"
|
||||
if total_requirements > 0
|
||||
else "0.0%"
|
||||
),
|
||||
],
|
||||
["TOTAL", str(total_requirements), "100%"],
|
||||
]
|
||||
@@ -2818,7 +2835,11 @@ def generate_ens_report(
|
||||
|
||||
# Build the PDF
|
||||
logger.info("Building PDF...")
|
||||
doc.build(elements, onFirstPage=_add_pdf_footer, onLaterPages=_add_pdf_footer)
|
||||
doc.build(
|
||||
elements,
|
||||
onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
)
|
||||
except Exception as e:
|
||||
tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
|
||||
logger.error(f"Error building ENS report, line {tb_lineno} -- {e}")
|
||||
@@ -3365,7 +3386,11 @@ def generate_nis2_report(
|
||||
|
||||
# Build the PDF
|
||||
logger.info("Building NIS2 PDF...")
|
||||
doc.build(elements, onFirstPage=_add_pdf_footer, onLaterPages=_add_pdf_footer)
|
||||
doc.build(
|
||||
elements,
|
||||
onFirstPage=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
onLaterPages=partial(_add_pdf_footer, compliance_name=compliance_name),
|
||||
)
|
||||
logger.info(f"NIS2 report successfully generated at {output_path}")
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -8,11 +8,12 @@ from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
import sentry_sdk
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.env import env
|
||||
from config.settings.celery import CELERY_DEADLOCK_ATTEMPTS
|
||||
from django.db import IntegrityError, OperationalError
|
||||
from django.db.models import Case, Count, IntegerField, Prefetch, Sum, When
|
||||
from django.db.models import Case, Count, IntegerField, Prefetch, Q, Sum, When
|
||||
from tasks.utils import CustomEncoder
|
||||
|
||||
from api.compliance import PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE
|
||||
@@ -26,8 +27,10 @@ from api.db_utils import (
|
||||
)
|
||||
from api.exceptions import ProviderConnectionError
|
||||
from api.models import (
|
||||
AttackSurfaceOverview,
|
||||
ComplianceOverviewSummary,
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
MuteRule,
|
||||
Processor,
|
||||
@@ -37,12 +40,14 @@ from api.models import (
|
||||
ResourceScanSummary,
|
||||
ResourceTag,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
ScanSummary,
|
||||
StateChoices,
|
||||
)
|
||||
from api.models import StatusChoices as FindingStatus
|
||||
from api.utils import initialize_prowler_provider, return_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from prowler.lib.check.models import CheckMetadata
|
||||
from prowler.lib.outputs.finding import Finding as ProwlerFinding
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
|
||||
@@ -74,6 +79,77 @@ FINDINGS_MICRO_BATCH_SIZE = env.int("DJANGO_FINDINGS_MICRO_BATCH_SIZE", default=
|
||||
# Controls how many rows each ORM bulk_create/bulk_update call sends to Postgres
|
||||
SCAN_DB_BATCH_SIZE = env.int("DJANGO_SCAN_DB_BATCH_SIZE", default=500)
|
||||
|
||||
ATTACK_SURFACE_PROVIDER_COMPATIBILITY = {
|
||||
"internet-exposed": None, # Compatible with all providers
|
||||
"secrets": None, # Compatible with all providers
|
||||
"privilege-escalation": ["aws", "kubernetes"],
|
||||
"ec2-imdsv1": ["aws"],
|
||||
}
|
||||
|
||||
_ATTACK_SURFACE_MAPPING_CACHE: dict[str, dict] = {}
|
||||
|
||||
|
||||
def aggregate_category_counts(
|
||||
categories: list[str],
|
||||
severity: str,
|
||||
status: str,
|
||||
delta: str | None,
|
||||
muted: bool,
|
||||
cache: dict[tuple[str, str], dict[str, int]],
|
||||
) -> None:
|
||||
"""
|
||||
Increment category counters in-place for a finding.
|
||||
|
||||
Args:
|
||||
categories: List of categories from finding metadata.
|
||||
severity: Severity level (e.g., "high", "medium").
|
||||
status: Finding status as string ("FAIL", "PASS").
|
||||
delta: Delta value as string ("new", "changed") or None.
|
||||
muted: Whether the finding is muted.
|
||||
cache: Dict {(category, severity): {"total", "failed", "new_failed"}} to update.
|
||||
"""
|
||||
is_failed = status == "FAIL" and not muted
|
||||
is_new_failed = is_failed and delta == "new"
|
||||
|
||||
for cat in categories:
|
||||
key = (cat, severity)
|
||||
if key not in cache:
|
||||
cache[key] = {"total": 0, "failed": 0, "new_failed": 0}
|
||||
if not muted:
|
||||
cache[key]["total"] += 1
|
||||
if is_failed:
|
||||
cache[key]["failed"] += 1
|
||||
if is_new_failed:
|
||||
cache[key]["new_failed"] += 1
|
||||
|
||||
|
||||
def _get_attack_surface_mapping_from_provider(provider_type: str) -> dict:
|
||||
global _ATTACK_SURFACE_MAPPING_CACHE
|
||||
|
||||
if provider_type in _ATTACK_SURFACE_MAPPING_CACHE:
|
||||
return _ATTACK_SURFACE_MAPPING_CACHE[provider_type]
|
||||
|
||||
attack_surface_check_mappings = {
|
||||
"internet-exposed": None,
|
||||
"secrets": None,
|
||||
"privilege-escalation": {
|
||||
"iam_policy_allows_privilege_escalation",
|
||||
"iam_inline_policy_allows_privilege_escalation",
|
||||
},
|
||||
"ec2-imdsv1": {
|
||||
"ec2_instance_imdsv2_enabled"
|
||||
}, # AWS only - IMDSv1 enabled findings
|
||||
}
|
||||
for category_name, check_ids in attack_surface_check_mappings.items():
|
||||
if check_ids is None:
|
||||
sdk_check_ids = CheckMetadata.list(
|
||||
provider=provider_type, category=category_name
|
||||
)
|
||||
attack_surface_check_mappings[category_name] = sdk_check_ids
|
||||
|
||||
_ATTACK_SURFACE_MAPPING_CACHE[provider_type] = attack_surface_check_mappings
|
||||
return attack_surface_check_mappings
|
||||
|
||||
|
||||
def _create_finding_delta(
|
||||
last_status: FindingStatus | None | str, new_status: FindingStatus | None
|
||||
@@ -330,7 +406,7 @@ def _create_compliance_summaries(
|
||||
if summary_objects:
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverviewSummary.objects.bulk_create(
|
||||
summary_objects, batch_size=500
|
||||
summary_objects, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
|
||||
|
||||
@@ -357,6 +433,7 @@ def _process_finding_micro_batch(
|
||||
unique_resources: set,
|
||||
scan_resource_cache: set,
|
||||
mute_rules_cache: dict,
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]],
|
||||
) -> None:
|
||||
"""
|
||||
Process a micro-batch of findings and persist them using bulk operations.
|
||||
@@ -377,6 +454,7 @@ def _process_finding_micro_batch(
|
||||
unique_resources: Set tracking (uid, region) pairs seen in the scan.
|
||||
scan_resource_cache: Set of tuples used to create `ResourceScanSummary` rows.
|
||||
mute_rules_cache: Map of finding UID -> mute reason gathered before the scan.
|
||||
scan_categories_cache: Dict tracking category counts {(category, severity): {"total", "failed", "new_failed"}}.
|
||||
"""
|
||||
# Accumulate objects for bulk operations
|
||||
findings_to_create = []
|
||||
@@ -532,11 +610,12 @@ def _process_finding_micro_batch(
|
||||
resource_failed_findings_cache[resource_uid] += 1
|
||||
|
||||
# Create finding object (don't save yet)
|
||||
check_metadata = finding.get_metadata()
|
||||
finding_instance = Finding(
|
||||
tenant_id=tenant_id,
|
||||
uid=finding_uid,
|
||||
delta=delta,
|
||||
check_metadata=finding.get_metadata(),
|
||||
check_metadata=check_metadata,
|
||||
status=status,
|
||||
status_extended=finding.status_extended,
|
||||
severity=finding.severity,
|
||||
@@ -549,6 +628,7 @@ def _process_finding_micro_batch(
|
||||
muted_at=datetime.now(tz=timezone.utc) if is_muted else None,
|
||||
muted_reason=muted_reason,
|
||||
compliance=finding.compliance,
|
||||
categories=check_metadata.get("categories", []) or [],
|
||||
)
|
||||
findings_to_create.append(finding_instance)
|
||||
resource_denormalized_data.append((finding_instance, resource_instance))
|
||||
@@ -563,6 +643,16 @@ def _process_finding_micro_batch(
|
||||
)
|
||||
)
|
||||
|
||||
# Track categories with counts for ScanCategorySummary by (category, severity)
|
||||
aggregate_category_counts(
|
||||
categories=check_metadata.get("categories", []) or [],
|
||||
severity=finding.severity.value,
|
||||
status=status.value,
|
||||
delta=delta.value if delta else None,
|
||||
muted=is_muted,
|
||||
cache=scan_categories_cache,
|
||||
)
|
||||
|
||||
# Bulk operations within single transaction
|
||||
with rls_transaction(tenant_id):
|
||||
# Bulk create findings
|
||||
@@ -662,6 +752,7 @@ def perform_prowler_scan(
|
||||
exception = None
|
||||
unique_resources = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
start_time = time.time()
|
||||
exc = None
|
||||
|
||||
@@ -751,6 +842,7 @@ def perform_prowler_scan(
|
||||
unique_resources=unique_resources,
|
||||
scan_resource_cache=scan_resource_cache,
|
||||
mute_rules_cache=mute_rules_cache,
|
||||
scan_categories_cache=scan_categories_cache,
|
||||
)
|
||||
|
||||
# Update scan progress
|
||||
@@ -810,13 +902,33 @@ def perform_prowler_scan(
|
||||
resource_scan_summaries, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
except Exception as filter_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(filter_exception)
|
||||
logger.error(
|
||||
f"Error storing filter values for scan {scan_id}: {filter_exception}"
|
||||
)
|
||||
|
||||
try:
|
||||
if scan_categories_cache:
|
||||
category_summaries = [
|
||||
ScanCategorySummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
category=category,
|
||||
severity=severity,
|
||||
total_findings=counts["total"],
|
||||
failed_findings=counts["failed"],
|
||||
new_failed_findings=counts["new_failed"],
|
||||
)
|
||||
for (category, severity), counts in scan_categories_cache.items()
|
||||
]
|
||||
with rls_transaction(tenant_id):
|
||||
ScanCategorySummary.objects.bulk_create(
|
||||
category_summaries, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
except Exception as cat_exception:
|
||||
sentry_sdk.capture_exception(cat_exception)
|
||||
logger.error(f"Error storing categories for scan {scan_id}: {cat_exception}")
|
||||
|
||||
serializer = ScanTaskSerializer(instance=scan_instance)
|
||||
return serializer.data
|
||||
|
||||
@@ -979,11 +1091,14 @@ def _aggregate_findings_by_region(
|
||||
findings_count_by_compliance = {}
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Fetch findings with resources in a single efficient query
|
||||
# Use select_related for finding fields and prefetch_related for many-to-many resources
|
||||
# Fetch only PASS/FAIL findings (optimized query reduces data transfer)
|
||||
# Other statuses are not needed for check_status or ThreatScore calculation
|
||||
findings = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id, muted=False
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
muted=False,
|
||||
status__in=["PASS", "FAIL"],
|
||||
)
|
||||
.only("id", "check_id", "status", "compliance")
|
||||
.prefetch_related(
|
||||
@@ -1001,6 +1116,8 @@ def _aggregate_findings_by_region(
|
||||
)
|
||||
|
||||
for finding in findings:
|
||||
status = finding.status
|
||||
|
||||
for resource in finding.small_resources:
|
||||
region = resource.region
|
||||
|
||||
@@ -1008,7 +1125,7 @@ def _aggregate_findings_by_region(
|
||||
current_status = check_status_by_region.setdefault(region, {})
|
||||
# Priority: FAIL > any other status
|
||||
if current_status.get(finding.check_id) != "FAIL":
|
||||
current_status[finding.check_id] = finding.status
|
||||
current_status[finding.check_id] = status
|
||||
|
||||
# Aggregate ThreatScore compliance counts
|
||||
if modeled_threatscore_compliance_id in (finding.compliance or {}):
|
||||
@@ -1023,7 +1140,7 @@ def _aggregate_findings_by_region(
|
||||
requirement_id, {"total": 0, "pass": 0}
|
||||
)
|
||||
requirement_stats["total"] += 1
|
||||
if finding.status == "PASS":
|
||||
if status == "PASS":
|
||||
requirement_stats["pass"] += 1
|
||||
|
||||
return check_status_by_region, findings_count_by_compliance
|
||||
@@ -1191,3 +1308,184 @@ def create_compliance_requirements(tenant_id: str, scan_id: str):
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating compliance requirements for scan {scan_id}: {e}")
|
||||
raise e
|
||||
|
||||
|
||||
def aggregate_attack_surface(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Aggregate findings into attack surface overview records.
|
||||
|
||||
Creates one AttackSurfaceOverview record per attack surface type
|
||||
for the given scan, based on check_id mappings.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant that owns the scan.
|
||||
scan_id: Scan UUID whose findings should be aggregated.
|
||||
"""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
scan_instance = Scan.all_objects.select_related("provider").get(pk=scan_id)
|
||||
provider_type = scan_instance.provider.provider
|
||||
|
||||
provider_attack_surface_mapping = _get_attack_surface_mapping_from_provider(
|
||||
provider_type=provider_type
|
||||
)
|
||||
|
||||
# Filter out attack surfaces that are not compatible or have no resolved check IDs
|
||||
supported_mappings: dict[str, list[str]] = {}
|
||||
for attack_surface_type, check_ids in provider_attack_surface_mapping.items():
|
||||
compatible_providers = ATTACK_SURFACE_PROVIDER_COMPATIBILITY.get(
|
||||
attack_surface_type
|
||||
)
|
||||
if (
|
||||
compatible_providers is not None
|
||||
and provider_type not in compatible_providers
|
||||
):
|
||||
logger.info(
|
||||
f"Skipping {attack_surface_type} - not supported for {provider_type}"
|
||||
)
|
||||
continue
|
||||
|
||||
if not check_ids:
|
||||
logger.info(
|
||||
f"Skipping {attack_surface_type} - no check IDs resolved for {provider_type}"
|
||||
)
|
||||
continue
|
||||
|
||||
supported_mappings[attack_surface_type] = list(check_ids)
|
||||
|
||||
if not supported_mappings:
|
||||
logger.info(
|
||||
f"No attack surface mappings available for scan {scan_id} and provider {provider_type}"
|
||||
)
|
||||
logger.info(f"No attack surface overview records created for scan {scan_id}")
|
||||
return
|
||||
|
||||
# Map every check_id to its attack surface, so we can aggregate with a single query
|
||||
check_id_to_surface: dict[str, str] = {}
|
||||
for attack_surface_type, check_ids in supported_mappings.items():
|
||||
for check_id in check_ids:
|
||||
check_id_to_surface[check_id] = attack_surface_type
|
||||
|
||||
aggregated_counts = {
|
||||
attack_surface_type: {"total": 0, "failed": 0, "muted": 0}
|
||||
for attack_surface_type in supported_mappings.keys()
|
||||
}
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
finding_stats = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
check_id__in=list(check_id_to_surface.keys()),
|
||||
)
|
||||
.values("check_id")
|
||||
.annotate(
|
||||
total=Count("id"),
|
||||
failed=Count("id", filter=Q(status="FAIL", muted=False)),
|
||||
muted=Count("id", filter=Q(status="FAIL", muted=True)),
|
||||
)
|
||||
)
|
||||
|
||||
for stats in finding_stats:
|
||||
attack_surface_type = check_id_to_surface.get(stats["check_id"])
|
||||
if not attack_surface_type:
|
||||
continue
|
||||
|
||||
aggregated_counts[attack_surface_type]["total"] += stats["total"] or 0
|
||||
aggregated_counts[attack_surface_type]["failed"] += stats["failed"] or 0
|
||||
aggregated_counts[attack_surface_type]["muted"] += stats["muted"] or 0
|
||||
|
||||
overview_objects = []
|
||||
for attack_surface_type, counts in aggregated_counts.items():
|
||||
total = counts["total"]
|
||||
if not total:
|
||||
continue
|
||||
|
||||
overview_objects.append(
|
||||
AttackSurfaceOverview(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
attack_surface_type=attack_surface_type,
|
||||
total_findings=total,
|
||||
failed_findings=counts["failed"],
|
||||
muted_failed_findings=counts["muted"],
|
||||
)
|
||||
)
|
||||
|
||||
# Bulk create overview records
|
||||
if overview_objects:
|
||||
with rls_transaction(tenant_id):
|
||||
AttackSurfaceOverview.objects.bulk_create(overview_objects, batch_size=500)
|
||||
logger.info(
|
||||
f"Created {len(overview_objects)} attack surface overview records for scan {scan_id}"
|
||||
)
|
||||
else:
|
||||
logger.info(f"No attack surface overview records created for scan {scan_id}")
|
||||
|
||||
|
||||
def aggregate_daily_severity(tenant_id: str, scan_id: str):
|
||||
"""Aggregate scan severity counts into DailySeveritySummary (one record per provider/day)."""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
scan = Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id=scan_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
).first()
|
||||
|
||||
if not scan:
|
||||
logger.warning(f"Scan {scan_id} not found or not completed")
|
||||
return {"status": "scan is not completed"}
|
||||
|
||||
provider_id = scan.provider_id
|
||||
scan_date = scan.completed_at.date()
|
||||
|
||||
severity_totals = (
|
||||
ScanSummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
)
|
||||
.values("severity")
|
||||
.annotate(total_fail=Sum("fail"), total_muted=Sum("muted"))
|
||||
)
|
||||
|
||||
severity_data = {
|
||||
"critical": 0,
|
||||
"high": 0,
|
||||
"medium": 0,
|
||||
"low": 0,
|
||||
"informational": 0,
|
||||
"muted": 0,
|
||||
}
|
||||
|
||||
for row in severity_totals:
|
||||
severity = row["severity"]
|
||||
if severity in severity_data:
|
||||
severity_data[severity] = row["total_fail"] or 0
|
||||
severity_data["muted"] += row["total_muted"] or 0
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
summary, created = DailySeveritySummary.objects.update_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider_id,
|
||||
date=scan_date,
|
||||
defaults={
|
||||
"scan_id": scan_id,
|
||||
"critical": severity_data["critical"],
|
||||
"high": severity_data["high"],
|
||||
"medium": severity_data["medium"],
|
||||
"low": severity_data["low"],
|
||||
"informational": severity_data["informational"],
|
||||
"muted": severity_data["muted"],
|
||||
},
|
||||
)
|
||||
|
||||
action = "created" if created else "updated"
|
||||
logger.info(
|
||||
f"Daily severity summary {action} for provider {provider_id} on {scan_date}"
|
||||
)
|
||||
|
||||
return {
|
||||
"status": action,
|
||||
"provider_id": str(provider_id),
|
||||
"date": str(scan_date),
|
||||
"severity_data": severity_data,
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIR
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_compliance_summaries,
|
||||
backfill_daily_severity_summaries,
|
||||
backfill_resource_scan_summaries,
|
||||
backfill_scan_category_summaries,
|
||||
)
|
||||
from tasks.jobs.connection import (
|
||||
check_integration_connection,
|
||||
@@ -37,6 +39,8 @@ from tasks.jobs.lighthouse_providers import (
|
||||
from tasks.jobs.muting import mute_historical_findings
|
||||
from tasks.jobs.report import generate_compliance_reports_job
|
||||
from tasks.jobs.scan import (
|
||||
aggregate_attack_surface,
|
||||
aggregate_daily_severity,
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
@@ -46,7 +50,7 @@ from tasks.utils import batched, get_next_execution_datetime
|
||||
from api.compliance import get_compliance_frameworks
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.decorators import handle_provider_deletion, set_tenant
|
||||
from api.models import Finding, Integration, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
@@ -69,10 +73,16 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
create_compliance_requirements_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
aggregate_attack_surface_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
chain(
|
||||
perform_scan_summary_task.si(tenant_id=tenant_id, scan_id=scan_id),
|
||||
generate_outputs_task.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
group(
|
||||
aggregate_daily_severity_task.si(tenant_id=tenant_id, scan_id=scan_id),
|
||||
generate_outputs_task.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
),
|
||||
group(
|
||||
# Use optimized task that generates both reports with shared queries
|
||||
@@ -140,6 +150,7 @@ def delete_provider_task(provider_id: str, tenant_id: str):
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="scan-perform", queue="scans")
|
||||
@handle_provider_deletion
|
||||
def perform_scan_task(
|
||||
tenant_id: str, scan_id: str, provider_id: str, checks_to_execute: list[str] = None
|
||||
):
|
||||
@@ -172,6 +183,7 @@ def perform_scan_task(
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, bind=True, name="scan-perform-scheduled", queue="scans")
|
||||
@handle_provider_deletion
|
||||
def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
"""
|
||||
Task to perform a scheduled Prowler scan on a given provider.
|
||||
@@ -277,6 +289,7 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
|
||||
|
||||
@shared_task(name="scan-summary", queue="overview")
|
||||
@handle_provider_deletion
|
||||
def perform_scan_summary_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_findings(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
@@ -292,6 +305,7 @@ def delete_tenant_task(tenant_id: str):
|
||||
queue="scan-reports",
|
||||
)
|
||||
@set_tenant(keep_tenant=True)
|
||||
@handle_provider_deletion
|
||||
def generate_outputs_task(scan_id: str, provider_id: str, tenant_id: str):
|
||||
"""
|
||||
Process findings in batches and generate output files in multiple formats.
|
||||
@@ -487,6 +501,7 @@ def generate_outputs_task(scan_id: str, provider_id: str, tenant_id: str):
|
||||
|
||||
|
||||
@shared_task(name="backfill-scan-resource-summaries", queue="backfill")
|
||||
@handle_provider_deletion
|
||||
def backfill_scan_resource_summaries_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Tries to backfill the resource scan summaries table for a given scan.
|
||||
@@ -499,6 +514,7 @@ def backfill_scan_resource_summaries_task(tenant_id: str, scan_id: str):
|
||||
|
||||
|
||||
@shared_task(name="backfill-compliance-summaries", queue="backfill")
|
||||
@handle_provider_deletion
|
||||
def backfill_compliance_summaries_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Tries to backfill compliance overview summaries for a completed scan.
|
||||
@@ -513,7 +529,29 @@ def backfill_compliance_summaries_task(tenant_id: str, scan_id: str):
|
||||
return backfill_compliance_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="backfill-daily-severity-summaries", queue="backfill")
|
||||
def backfill_daily_severity_summaries_task(tenant_id: str, days: int = None):
|
||||
"""Backfill DailySeveritySummary from historical scans. Use days param to limit scope."""
|
||||
return backfill_daily_severity_summaries(tenant_id=tenant_id, days=days)
|
||||
|
||||
|
||||
@shared_task(name="backfill-scan-category-summaries", queue="backfill")
|
||||
@handle_provider_deletion
|
||||
def backfill_scan_category_summaries_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Backfill ScanCategorySummary for a completed scan.
|
||||
|
||||
Aggregates unique categories from findings and creates a summary row.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
"""
|
||||
return backfill_scan_category_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="scan-compliance-overviews", queue="compliance")
|
||||
@handle_provider_deletion
|
||||
def create_compliance_requirements_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Creates detailed compliance requirement records for a scan.
|
||||
@@ -529,6 +567,29 @@ def create_compliance_requirements_task(tenant_id: str, scan_id: str):
|
||||
return create_compliance_requirements(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="scan-attack-surface-overviews", queue="overview")
|
||||
@handle_provider_deletion
|
||||
def aggregate_attack_surface_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Creates attack surface overview records for a scan.
|
||||
|
||||
This task processes findings and aggregates them into attack surface categories
|
||||
(internet-exposed, secrets, privilege-escalation, ec2-imdsv1) for quick overview queries.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for which to create records.
|
||||
scan_id (str): The ID of the scan for which to create records.
|
||||
"""
|
||||
return aggregate_attack_surface(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="scan-daily-severity", queue="overview")
|
||||
@handle_provider_deletion
|
||||
def aggregate_daily_severity_task(tenant_id: str, scan_id: str):
|
||||
"""Aggregate scan severity into DailySeveritySummary for findings_severity/timeseries endpoint."""
|
||||
return aggregate_daily_severity(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="lighthouse-connection-check")
|
||||
@set_tenant
|
||||
def check_lighthouse_connection_task(lighthouse_config_id: str, tenant_id: str = None):
|
||||
@@ -567,6 +628,7 @@ def refresh_lighthouse_provider_models_task(
|
||||
|
||||
|
||||
@shared_task(name="integration-check")
|
||||
@handle_provider_deletion
|
||||
def check_integrations_task(tenant_id: str, provider_id: str, scan_id: str = None):
|
||||
"""
|
||||
Check and execute all configured integrations for a provider.
|
||||
@@ -631,6 +693,7 @@ def check_integrations_task(tenant_id: str, provider_id: str, scan_id: str = Non
|
||||
name="integration-s3",
|
||||
queue="integrations",
|
||||
)
|
||||
@handle_provider_deletion
|
||||
def s3_integration_task(
|
||||
tenant_id: str,
|
||||
provider_id: str,
|
||||
@@ -690,6 +753,7 @@ def jira_integration_task(
|
||||
name="scan-compliance-reports",
|
||||
queue="scan-reports",
|
||||
)
|
||||
@handle_provider_deletion
|
||||
def generate_compliance_reports_task(tenant_id: str, scan_id: str, provider_id: str):
|
||||
"""
|
||||
Optimized task to generate ThreatScore, ENS, and NIS2 reports with shared queries.
|
||||
|
||||
@@ -4,14 +4,19 @@ import pytest
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_compliance_summaries,
|
||||
backfill_resource_scan_summaries,
|
||||
backfill_scan_category_summaries,
|
||||
)
|
||||
|
||||
from api.models import (
|
||||
ComplianceOverviewSummary,
|
||||
Finding,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
ScanCategorySummary,
|
||||
StateChoices,
|
||||
)
|
||||
from prowler.lib.check.models import Severity
|
||||
from prowler.lib.outputs.finding import Status
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@@ -46,6 +51,45 @@ def get_not_completed_scans(providers_fixture):
|
||||
return scan_1, scan_2
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def findings_with_categories_fixture(scans_fixture, resources_fixture):
|
||||
scan = scans_fixture[0]
|
||||
resource = resources_fixture[0]
|
||||
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
uid="finding_with_categories",
|
||||
scan=scan,
|
||||
delta="new",
|
||||
status=Status.FAIL,
|
||||
status_extended="test status",
|
||||
impact=Severity.critical,
|
||||
impact_extended="test impact",
|
||||
severity=Severity.critical,
|
||||
raw_result={"status": Status.FAIL},
|
||||
check_id="test_check",
|
||||
check_metadata={"CheckId": "test_check"},
|
||||
categories=["gen-ai", "security"],
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
finding.add_resources([resource])
|
||||
return finding
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def scan_category_summary_fixture(scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
return ScanCategorySummary.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
scan=scan,
|
||||
category="existing-category",
|
||||
severity=Severity.critical,
|
||||
total_findings=1,
|
||||
failed_findings=0,
|
||||
new_failed_findings=0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestBackfillResourceScanSummaries:
|
||||
def test_already_backfilled(self, resource_scan_summary_data):
|
||||
@@ -172,3 +216,47 @@ class TestBackfillComplianceSummaries:
|
||||
assert summary.requirements_failed == expected_counts["requirements_failed"]
|
||||
assert summary.requirements_manual == expected_counts["requirements_manual"]
|
||||
assert summary.total_requirements == expected_counts["total_requirements"]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestBackfillScanCategorySummaries:
|
||||
def test_already_backfilled(self, scan_category_summary_fixture):
|
||||
tenant_id = scan_category_summary_fixture.tenant_id
|
||||
scan_id = scan_category_summary_fixture.scan_id
|
||||
|
||||
result = backfill_scan_category_summaries(str(tenant_id), str(scan_id))
|
||||
|
||||
assert result == {"status": "already backfilled"}
|
||||
|
||||
def test_not_completed_scan(self, get_not_completed_scans):
|
||||
for scan in get_not_completed_scans:
|
||||
result = backfill_scan_category_summaries(str(scan.tenant_id), str(scan.id))
|
||||
assert result == {"status": "scan is not completed"}
|
||||
|
||||
def test_no_categories_to_backfill(self, scans_fixture):
|
||||
scan = scans_fixture[1] # Failed scan with no findings
|
||||
result = backfill_scan_category_summaries(str(scan.tenant_id), str(scan.id))
|
||||
assert result == {"status": "no categories to backfill"}
|
||||
|
||||
def test_successful_backfill(self, findings_with_categories_fixture):
|
||||
finding = findings_with_categories_fixture
|
||||
tenant_id = str(finding.tenant_id)
|
||||
scan_id = str(finding.scan_id)
|
||||
|
||||
result = backfill_scan_category_summaries(tenant_id, scan_id)
|
||||
|
||||
# 2 categories × 1 severity = 2 rows
|
||||
assert result == {"status": "backfilled", "categories_count": 2}
|
||||
|
||||
summaries = ScanCategorySummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
)
|
||||
assert summaries.count() == 2
|
||||
categories = set(summaries.values_list("category", flat=True))
|
||||
assert categories == {"gen-ai", "security"}
|
||||
|
||||
for summary in summaries:
|
||||
assert summary.severity == Severity.critical
|
||||
assert summary.total_findings == 1
|
||||
assert summary.failed_findings == 1
|
||||
assert summary.new_failed_findings == 1
|
||||
|
||||
@@ -28,6 +28,7 @@ class TestScheduleProviderScan:
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
},
|
||||
countdown=5,
|
||||
)
|
||||
|
||||
task_name = f"scan-perform-scheduled-{provider_instance.id}"
|
||||
|
||||
@@ -9,14 +9,18 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.scan import (
|
||||
_ATTACK_SURFACE_MAPPING_CACHE,
|
||||
_aggregate_findings_by_region,
|
||||
_copy_compliance_requirement_rows,
|
||||
_create_compliance_summaries,
|
||||
_create_finding_delta,
|
||||
_get_attack_surface_mapping_from_provider,
|
||||
_normalized_compliance_key,
|
||||
_persist_compliance_requirement_rows,
|
||||
_process_finding_micro_batch,
|
||||
_store_resources,
|
||||
aggregate_attack_surface,
|
||||
aggregate_category_counts,
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
@@ -1374,6 +1378,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources: set[tuple[str, str]] = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
mute_rules_cache = {}
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
|
||||
with (
|
||||
patch("tasks.jobs.scan.rls_transaction", new=noop_rls_transaction),
|
||||
@@ -1391,6 +1396,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources,
|
||||
scan_resource_cache,
|
||||
mute_rules_cache,
|
||||
scan_categories_cache,
|
||||
)
|
||||
|
||||
created_finding = Finding.objects.get(uid=finding.uid)
|
||||
@@ -1483,6 +1489,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources: set[tuple[str, str]] = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
mute_rules_cache = {finding.uid: "Muted via rule"}
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
|
||||
with (
|
||||
patch("tasks.jobs.scan.rls_transaction", new=noop_rls_transaction),
|
||||
@@ -1500,6 +1507,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources,
|
||||
scan_resource_cache,
|
||||
mute_rules_cache,
|
||||
scan_categories_cache,
|
||||
)
|
||||
|
||||
existing_resource.refresh_from_db()
|
||||
@@ -1607,6 +1615,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources: set[tuple[str, str]] = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
mute_rules_cache = {}
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
|
||||
with (
|
||||
patch("tasks.jobs.scan.rls_transaction", new=noop_rls_transaction),
|
||||
@@ -1625,6 +1634,7 @@ class TestProcessFindingMicroBatch:
|
||||
unique_resources,
|
||||
scan_resource_cache,
|
||||
mute_rules_cache,
|
||||
scan_categories_cache,
|
||||
)
|
||||
|
||||
# Verify the long UID finding was NOT created
|
||||
@@ -1645,6 +1655,118 @@ class TestProcessFindingMicroBatch:
|
||||
for call in warning_calls
|
||||
)
|
||||
|
||||
def test_process_finding_micro_batch_tracks_categories(
|
||||
self, tenants_fixture, scans_fixture
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = scan.provider
|
||||
|
||||
finding1 = FakeFinding(
|
||||
uid="finding-cat-1",
|
||||
status=StatusChoices.PASS,
|
||||
status_extended="all good",
|
||||
severity=Severity.low,
|
||||
check_id="genai_check",
|
||||
resource_uid="arn:aws:bedrock:::model/test",
|
||||
resource_name="test-model",
|
||||
region="us-east-1",
|
||||
service_name="bedrock",
|
||||
resource_type="model",
|
||||
resource_tags={},
|
||||
resource_metadata={},
|
||||
resource_details={},
|
||||
partition="aws",
|
||||
raw={},
|
||||
compliance={},
|
||||
metadata={"categories": ["gen-ai", "security"]},
|
||||
muted=False,
|
||||
)
|
||||
|
||||
finding2 = FakeFinding(
|
||||
uid="finding-cat-2",
|
||||
status=StatusChoices.FAIL,
|
||||
status_extended="bad",
|
||||
severity=Severity.high,
|
||||
check_id="iam_check",
|
||||
resource_uid="arn:aws:iam:::user/test",
|
||||
resource_name="test-user",
|
||||
region="us-east-1",
|
||||
service_name="iam",
|
||||
resource_type="user",
|
||||
resource_tags={},
|
||||
resource_metadata={},
|
||||
resource_details={},
|
||||
partition="aws",
|
||||
raw={},
|
||||
compliance={},
|
||||
metadata={"categories": ["security", "iam"]},
|
||||
muted=False,
|
||||
)
|
||||
|
||||
resource_cache = {}
|
||||
tag_cache = {}
|
||||
last_status_cache = {}
|
||||
resource_failed_findings_cache = {}
|
||||
unique_resources: set[tuple[str, str]] = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
mute_rules_cache = {}
|
||||
scan_categories_cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
|
||||
with (
|
||||
patch("tasks.jobs.scan.rls_transaction", new=noop_rls_transaction),
|
||||
patch("api.db_utils.rls_transaction", new=noop_rls_transaction),
|
||||
):
|
||||
_process_finding_micro_batch(
|
||||
str(tenant.id),
|
||||
[finding1, finding2],
|
||||
scan,
|
||||
provider,
|
||||
resource_cache,
|
||||
tag_cache,
|
||||
last_status_cache,
|
||||
resource_failed_findings_cache,
|
||||
unique_resources,
|
||||
scan_resource_cache,
|
||||
mute_rules_cache,
|
||||
scan_categories_cache,
|
||||
)
|
||||
|
||||
# finding1: PASS, severity=low, categories=["gen-ai", "security"]
|
||||
# finding2: FAIL, severity=high, categories=["security", "iam"]
|
||||
# Keys are (category, severity) tuples
|
||||
assert set(scan_categories_cache.keys()) == {
|
||||
("gen-ai", "low"),
|
||||
("security", "low"),
|
||||
("security", "high"),
|
||||
("iam", "high"),
|
||||
}
|
||||
assert scan_categories_cache[("gen-ai", "low")] == {
|
||||
"total": 1,
|
||||
"failed": 0,
|
||||
"new_failed": 0,
|
||||
}
|
||||
assert scan_categories_cache[("security", "low")] == {
|
||||
"total": 1,
|
||||
"failed": 0,
|
||||
"new_failed": 0,
|
||||
}
|
||||
assert scan_categories_cache[("security", "high")] == {
|
||||
"total": 1,
|
||||
"failed": 1,
|
||||
"new_failed": 1,
|
||||
}
|
||||
assert scan_categories_cache[("iam", "high")] == {
|
||||
"total": 1,
|
||||
"failed": 1,
|
||||
"new_failed": 1,
|
||||
}
|
||||
|
||||
created_finding1 = Finding.objects.get(uid="finding-cat-1")
|
||||
created_finding2 = Finding.objects.get(uid="finding-cat-2")
|
||||
assert set(created_finding1.categories) == {"gen-ai", "security"}
|
||||
assert set(created_finding2.categories) == {"security", "iam"}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCreateComplianceRequirements:
|
||||
@@ -3338,7 +3460,10 @@ class TestAggregateFindingsByRegion:
|
||||
|
||||
# Verify filter was called with muted=False
|
||||
mock_findings_filter.assert_called_once_with(
|
||||
tenant_id=tenant_id, scan_id=scan_id, muted=False
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
muted=False,
|
||||
status__in=["PASS", "FAIL"],
|
||||
)
|
||||
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@@ -3471,3 +3596,429 @@ class TestAggregateFindingsByRegion:
|
||||
|
||||
assert check_status_by_region == {}
|
||||
assert findings_count_by_compliance == {}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAggregateAttackSurface:
|
||||
"""Test aggregate_attack_surface function and related caching."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear cache before each test."""
|
||||
_ATTACK_SURFACE_MAPPING_CACHE.clear()
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clear cache after each test."""
|
||||
_ATTACK_SURFACE_MAPPING_CACHE.clear()
|
||||
|
||||
@patch("tasks.jobs.scan.CheckMetadata.list")
|
||||
def test_get_attack_surface_mapping_caches_result(self, mock_check_metadata_list):
|
||||
"""Test that _get_attack_surface_mapping_from_provider caches results."""
|
||||
mock_check_metadata_list.return_value = {"check_internet_exposed_1"}
|
||||
|
||||
# First call should hit CheckMetadata.list
|
||||
result1 = _get_attack_surface_mapping_from_provider("aws")
|
||||
assert mock_check_metadata_list.call_count == 2 # internet-exposed, secrets
|
||||
|
||||
# Second call should use cache
|
||||
result2 = _get_attack_surface_mapping_from_provider("aws")
|
||||
assert mock_check_metadata_list.call_count == 2 # No additional calls
|
||||
|
||||
assert result1 is result2
|
||||
assert "aws" in _ATTACK_SURFACE_MAPPING_CACHE
|
||||
|
||||
@patch("tasks.jobs.scan.CheckMetadata.list")
|
||||
def test_get_attack_surface_mapping_different_providers(
|
||||
self, mock_check_metadata_list
|
||||
):
|
||||
"""Test caching works independently for different providers."""
|
||||
mock_check_metadata_list.return_value = {"check_1"}
|
||||
|
||||
_get_attack_surface_mapping_from_provider("aws")
|
||||
aws_call_count = mock_check_metadata_list.call_count
|
||||
|
||||
_get_attack_surface_mapping_from_provider("gcp")
|
||||
gcp_call_count = mock_check_metadata_list.call_count
|
||||
|
||||
# Both providers should have made calls
|
||||
assert gcp_call_count > aws_call_count
|
||||
assert "aws" in _ATTACK_SURFACE_MAPPING_CACHE
|
||||
assert "gcp" in _ATTACK_SURFACE_MAPPING_CACHE
|
||||
|
||||
@patch("tasks.jobs.scan.CheckMetadata.list")
|
||||
def test_get_attack_surface_mapping_returns_hardcoded_checks(
|
||||
self, mock_check_metadata_list
|
||||
):
|
||||
"""Test that hardcoded check IDs are returned for privilege-escalation and ec2-imdsv1."""
|
||||
mock_check_metadata_list.return_value = set()
|
||||
|
||||
result = _get_attack_surface_mapping_from_provider("aws")
|
||||
|
||||
# Hardcoded checks should be present
|
||||
assert (
|
||||
"iam_policy_allows_privilege_escalation" in result["privilege-escalation"]
|
||||
)
|
||||
assert (
|
||||
"iam_inline_policy_allows_privilege_escalation"
|
||||
in result["privilege-escalation"]
|
||||
)
|
||||
assert "ec2_instance_imdsv2_enabled" in result["ec2-imdsv1"]
|
||||
|
||||
@patch("tasks.jobs.scan.AttackSurfaceOverview.objects.bulk_create")
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@patch("tasks.jobs.scan._get_attack_surface_mapping_from_provider")
|
||||
@patch("tasks.jobs.scan.rls_transaction")
|
||||
def test_aggregate_attack_surface_creates_overview_records(
|
||||
self,
|
||||
mock_rls_transaction,
|
||||
mock_get_mapping,
|
||||
mock_findings_filter,
|
||||
mock_bulk_create,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
"""Test that aggregate_attack_surface creates AttackSurfaceOverview records."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
scan.provider.provider = "aws"
|
||||
scan.provider.save()
|
||||
|
||||
mock_get_mapping.return_value = {
|
||||
"internet-exposed": {"check_internet_1", "check_internet_2"},
|
||||
"secrets": {"check_secrets_1"},
|
||||
"privilege-escalation": {"check_privesc_1"},
|
||||
"ec2-imdsv1": {"check_imdsv1_1"},
|
||||
}
|
||||
|
||||
# Mock findings aggregation
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.values.return_value = mock_queryset
|
||||
mock_queryset.annotate.return_value = [
|
||||
{"check_id": "check_internet_1", "total": 10, "failed": 3, "muted": 1},
|
||||
{"check_id": "check_secrets_1", "total": 5, "failed": 2, "muted": 0},
|
||||
]
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = None
|
||||
ctx.__exit__.return_value = False
|
||||
mock_rls_transaction.return_value = ctx
|
||||
mock_findings_filter.return_value = mock_queryset
|
||||
|
||||
aggregate_attack_surface(str(tenant.id), str(scan.id))
|
||||
|
||||
mock_bulk_create.assert_called_once()
|
||||
args, kwargs = mock_bulk_create.call_args
|
||||
objects = args[0]
|
||||
|
||||
# Should create records for internet-exposed and secrets (the ones with findings)
|
||||
assert len(objects) == 2
|
||||
assert kwargs["batch_size"] == 500
|
||||
|
||||
@patch("tasks.jobs.scan.AttackSurfaceOverview.objects.bulk_create")
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@patch("tasks.jobs.scan._get_attack_surface_mapping_from_provider")
|
||||
@patch("tasks.jobs.scan.rls_transaction")
|
||||
def test_aggregate_attack_surface_skips_unsupported_provider(
|
||||
self,
|
||||
mock_rls_transaction,
|
||||
mock_get_mapping,
|
||||
mock_findings_filter,
|
||||
mock_bulk_create,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
"""Test that ec2-imdsv1 is skipped for non-AWS providers."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
scan.provider.provider = "gcp"
|
||||
scan.provider.uid = "gcp-test-project-id"
|
||||
scan.provider.save()
|
||||
|
||||
mock_get_mapping.return_value = {
|
||||
"internet-exposed": {"check_internet_1"},
|
||||
"secrets": {"check_secrets_1"},
|
||||
"privilege-escalation": set(), # Not supported for GCP
|
||||
"ec2-imdsv1": {"check_imdsv1_1"}, # Should be skipped for GCP
|
||||
}
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.values.return_value = mock_queryset
|
||||
mock_queryset.annotate.return_value = [
|
||||
{"check_id": "check_internet_1", "total": 5, "failed": 1, "muted": 0},
|
||||
]
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = None
|
||||
ctx.__exit__.return_value = False
|
||||
mock_rls_transaction.return_value = ctx
|
||||
mock_findings_filter.return_value = mock_queryset
|
||||
|
||||
aggregate_attack_surface(str(tenant.id), str(scan.id))
|
||||
|
||||
# ec2-imdsv1 check_ids should not be in the filter
|
||||
filter_call = mock_findings_filter.call_args
|
||||
check_ids_in_filter = filter_call[1]["check_id__in"]
|
||||
assert "check_imdsv1_1" not in check_ids_in_filter
|
||||
|
||||
@patch("tasks.jobs.scan.AttackSurfaceOverview.objects.bulk_create")
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@patch("tasks.jobs.scan._get_attack_surface_mapping_from_provider")
|
||||
@patch("tasks.jobs.scan.rls_transaction")
|
||||
def test_aggregate_attack_surface_no_findings(
|
||||
self,
|
||||
mock_rls_transaction,
|
||||
mock_get_mapping,
|
||||
mock_findings_filter,
|
||||
mock_bulk_create,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
"""Test that no records are created when there are no findings."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
mock_get_mapping.return_value = {
|
||||
"internet-exposed": {"check_1"},
|
||||
"secrets": {"check_2"},
|
||||
"privilege-escalation": set(),
|
||||
"ec2-imdsv1": set(),
|
||||
}
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.values.return_value = mock_queryset
|
||||
mock_queryset.annotate.return_value = [] # No findings
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = None
|
||||
ctx.__exit__.return_value = False
|
||||
mock_rls_transaction.return_value = ctx
|
||||
mock_findings_filter.return_value = mock_queryset
|
||||
|
||||
aggregate_attack_surface(str(tenant.id), str(scan.id))
|
||||
|
||||
mock_bulk_create.assert_not_called()
|
||||
|
||||
@patch("tasks.jobs.scan.AttackSurfaceOverview.objects.bulk_create")
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@patch("tasks.jobs.scan._get_attack_surface_mapping_from_provider")
|
||||
@patch("tasks.jobs.scan.rls_transaction")
|
||||
def test_aggregate_attack_surface_aggregates_counts_correctly(
|
||||
self,
|
||||
mock_rls_transaction,
|
||||
mock_get_mapping,
|
||||
mock_findings_filter,
|
||||
mock_bulk_create,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
"""Test that counts from multiple check_ids are aggregated per attack surface type."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
scan.provider.provider = "aws"
|
||||
scan.provider.save()
|
||||
|
||||
mock_get_mapping.return_value = {
|
||||
"internet-exposed": {"check_internet_1", "check_internet_2"},
|
||||
"secrets": set(),
|
||||
"privilege-escalation": set(),
|
||||
"ec2-imdsv1": set(),
|
||||
}
|
||||
|
||||
mock_queryset = MagicMock()
|
||||
mock_queryset.values.return_value = mock_queryset
|
||||
mock_queryset.annotate.return_value = [
|
||||
{"check_id": "check_internet_1", "total": 10, "failed": 3, "muted": 1},
|
||||
{"check_id": "check_internet_2", "total": 5, "failed": 2, "muted": 0},
|
||||
]
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = None
|
||||
ctx.__exit__.return_value = False
|
||||
mock_rls_transaction.return_value = ctx
|
||||
mock_findings_filter.return_value = mock_queryset
|
||||
|
||||
aggregate_attack_surface(str(tenant.id), str(scan.id))
|
||||
|
||||
args, kwargs = mock_bulk_create.call_args
|
||||
objects = args[0]
|
||||
|
||||
assert len(objects) == 1
|
||||
overview = objects[0]
|
||||
assert overview.attack_surface_type == "internet-exposed"
|
||||
assert overview.total_findings == 15 # 10 + 5
|
||||
assert overview.failed_findings == 5 # 3 + 2
|
||||
assert overview.muted_failed_findings == 1 # 1 + 0
|
||||
|
||||
@patch("tasks.jobs.scan.Scan.all_objects.select_related")
|
||||
@patch("tasks.jobs.scan.rls_transaction")
|
||||
def test_aggregate_attack_surface_uses_select_related(
|
||||
self, mock_rls_transaction, mock_select_related, tenants_fixture, scans_fixture
|
||||
):
|
||||
"""Test that select_related is used to avoid N+1 query."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
mock_scan = MagicMock()
|
||||
mock_scan.provider.provider = "aws"
|
||||
|
||||
mock_select_related.return_value.get.return_value = mock_scan
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = None
|
||||
ctx.__exit__.return_value = False
|
||||
mock_rls_transaction.return_value = ctx
|
||||
|
||||
with patch(
|
||||
"tasks.jobs.scan._get_attack_surface_mapping_from_provider"
|
||||
) as mock_map:
|
||||
mock_map.return_value = {}
|
||||
|
||||
aggregate_attack_surface(str(tenant.id), str(scan.id))
|
||||
|
||||
mock_select_related.assert_called_once_with("provider")
|
||||
|
||||
|
||||
class TestAggregateCategoryCounts:
|
||||
"""Test aggregate_category_counts helper function."""
|
||||
|
||||
def test_aggregate_category_counts_basic(self):
|
||||
"""Test basic category counting for a non-muted PASS finding."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["security", "iam"],
|
||||
severity="high",
|
||||
status="PASS",
|
||||
delta=None,
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert ("security", "high") in cache
|
||||
assert ("iam", "high") in cache
|
||||
assert cache[("security", "high")] == {"total": 1, "failed": 0, "new_failed": 0}
|
||||
assert cache[("iam", "high")] == {"total": 1, "failed": 0, "new_failed": 0}
|
||||
|
||||
def test_aggregate_category_counts_fail_not_muted(self):
|
||||
"""Test category counting for a non-muted FAIL finding."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["security"],
|
||||
severity="critical",
|
||||
status="FAIL",
|
||||
delta=None,
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache[("security", "critical")] == {
|
||||
"total": 1,
|
||||
"failed": 1,
|
||||
"new_failed": 0,
|
||||
}
|
||||
|
||||
def test_aggregate_category_counts_new_fail(self):
|
||||
"""Test category counting for a new FAIL finding (delta='new')."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["gen-ai"],
|
||||
severity="high",
|
||||
status="FAIL",
|
||||
delta="new",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache[("gen-ai", "high")] == {"total": 1, "failed": 1, "new_failed": 1}
|
||||
|
||||
def test_aggregate_category_counts_muted_finding(self):
|
||||
"""Test that muted findings are excluded from all counts."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["security"],
|
||||
severity="high",
|
||||
status="FAIL",
|
||||
delta="new",
|
||||
muted=True,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache[("security", "high")] == {"total": 0, "failed": 0, "new_failed": 0}
|
||||
|
||||
def test_aggregate_category_counts_accumulates(self):
|
||||
"""Test that multiple calls accumulate counts."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
|
||||
# First finding: PASS
|
||||
aggregate_category_counts(
|
||||
categories=["security"],
|
||||
severity="high",
|
||||
status="PASS",
|
||||
delta=None,
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
# Second finding: FAIL (new)
|
||||
aggregate_category_counts(
|
||||
categories=["security"],
|
||||
severity="high",
|
||||
status="FAIL",
|
||||
delta="new",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
# Third finding: FAIL (changed)
|
||||
aggregate_category_counts(
|
||||
categories=["security"],
|
||||
severity="high",
|
||||
status="FAIL",
|
||||
delta="changed",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache[("security", "high")] == {"total": 3, "failed": 2, "new_failed": 1}
|
||||
|
||||
def test_aggregate_category_counts_empty_categories(self):
|
||||
"""Test with empty categories list."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=[],
|
||||
severity="high",
|
||||
status="FAIL",
|
||||
delta="new",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache == {}
|
||||
|
||||
def test_aggregate_category_counts_changed_delta(self):
|
||||
"""Test that changed delta increments failed but not new_failed."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["iam"],
|
||||
severity="medium",
|
||||
status="FAIL",
|
||||
delta="changed",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert cache[("iam", "medium")] == {"total": 1, "failed": 1, "new_failed": 0}
|
||||
|
||||
def test_aggregate_category_counts_multiple_categories_single_finding(self):
|
||||
"""Test single finding with multiple categories."""
|
||||
cache: dict[tuple[str, str], dict[str, int]] = {}
|
||||
aggregate_category_counts(
|
||||
categories=["security", "compliance", "data-protection"],
|
||||
severity="low",
|
||||
status="FAIL",
|
||||
delta="new",
|
||||
muted=False,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
assert len(cache) == 3
|
||||
for cat in ["security", "compliance", "data-protection"]:
|
||||
assert cache[(cat, "low")] == {"total": 1, "failed": 1, "new_failed": 1}
|
||||
|
||||
@@ -4,6 +4,10 @@ from unittest.mock import MagicMock, patch
|
||||
import openai
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from tasks.jobs.lighthouse_providers import (
|
||||
_create_bedrock_client,
|
||||
_extract_bedrock_credentials,
|
||||
)
|
||||
from tasks.tasks import (
|
||||
_perform_scan_complete_tasks,
|
||||
check_integrations_task,
|
||||
@@ -21,6 +25,198 @@ from api.models import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestExtractBedrockCredentials:
|
||||
"""Unit tests for _extract_bedrock_credentials helper function."""
|
||||
|
||||
def test_extract_access_key_credentials(self, tenants_fixture):
|
||||
"""Test extraction of access key + secret key credentials."""
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {
|
||||
"access_key_id": "AKIAIOSFODNN7EXAMPLE",
|
||||
"secret_access_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"region": "us-east-1",
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is not None
|
||||
assert result["access_key_id"] == "AKIAIOSFODNN7EXAMPLE"
|
||||
assert result["secret_access_key"] == "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
assert result["region"] == "us-east-1"
|
||||
assert "api_key" not in result
|
||||
|
||||
def test_extract_api_key_credentials(self, tenants_fixture):
|
||||
"""Test extraction of API key (bearer token) credentials."""
|
||||
valid_api_key = "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110)
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {
|
||||
"api_key": valid_api_key,
|
||||
"region": "us-west-2",
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is not None
|
||||
assert result["api_key"] == valid_api_key
|
||||
assert result["region"] == "us-west-2"
|
||||
assert "access_key_id" not in result
|
||||
assert "secret_access_key" not in result
|
||||
|
||||
def test_api_key_takes_precedence_over_access_keys(self, tenants_fixture):
|
||||
"""Test that API key is preferred when both auth methods are present."""
|
||||
valid_api_key = "ABSKQmVkcm9ja0FQSUtleS" + ("B" * 110)
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {
|
||||
"api_key": valid_api_key,
|
||||
"access_key_id": "AKIAIOSFODNN7EXAMPLE",
|
||||
"secret_access_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"region": "eu-west-1",
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is not None
|
||||
assert result["api_key"] == valid_api_key
|
||||
assert result["region"] == "eu-west-1"
|
||||
assert "access_key_id" not in result
|
||||
|
||||
def test_missing_region_returns_none(self, tenants_fixture):
|
||||
"""Test that missing region returns None."""
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {
|
||||
"api_key": "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110),
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_empty_credentials_returns_none(self, tenants_fixture):
|
||||
"""Test that empty credentials dict returns None (region only is not enough)."""
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
# Only region, no auth credentials - should return None
|
||||
provider_cfg.credentials_decoded = {
|
||||
"region": "us-east-1",
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_non_dict_credentials_returns_none(self, tenants_fixture):
|
||||
"""Test that non-dict credentials returns None."""
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
is_active=True,
|
||||
)
|
||||
# Store valid credentials first to pass model validation
|
||||
provider_cfg.credentials_decoded = {
|
||||
"api_key": "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110),
|
||||
"region": "us-east-1",
|
||||
}
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock the credentials_decoded property to return a non-dict value
|
||||
# This simulates corrupted/invalid stored data
|
||||
with patch.object(
|
||||
type(provider_cfg),
|
||||
"credentials_decoded",
|
||||
new_callable=lambda: property(lambda self: "invalid"),
|
||||
):
|
||||
result = _extract_bedrock_credentials(provider_cfg)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestCreateBedrockClient:
|
||||
"""Unit tests for _create_bedrock_client helper function."""
|
||||
|
||||
@patch("tasks.jobs.lighthouse_providers.boto3.client")
|
||||
def test_create_client_with_access_keys(self, mock_boto_client):
|
||||
"""Test creating client with access key authentication."""
|
||||
mock_client = MagicMock()
|
||||
mock_boto_client.return_value = mock_client
|
||||
|
||||
creds = {
|
||||
"access_key_id": "AKIAIOSFODNN7EXAMPLE",
|
||||
"secret_access_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"region": "us-east-1",
|
||||
}
|
||||
|
||||
result = _create_bedrock_client(creds)
|
||||
|
||||
assert result == mock_client
|
||||
mock_boto_client.assert_called_once_with(
|
||||
service_name="bedrock",
|
||||
region_name="us-east-1",
|
||||
aws_access_key_id="AKIAIOSFODNN7EXAMPLE",
|
||||
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
)
|
||||
|
||||
@patch("tasks.jobs.lighthouse_providers.Config")
|
||||
@patch("tasks.jobs.lighthouse_providers.boto3.client")
|
||||
def test_create_client_with_api_key(self, mock_boto_client, mock_config):
|
||||
"""Test creating client with API key authentication."""
|
||||
mock_client = MagicMock()
|
||||
mock_events = MagicMock()
|
||||
mock_client.meta.events = mock_events
|
||||
mock_boto_client.return_value = mock_client
|
||||
mock_config_instance = MagicMock()
|
||||
mock_config.return_value = mock_config_instance
|
||||
valid_api_key = "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110)
|
||||
|
||||
creds = {
|
||||
"api_key": valid_api_key,
|
||||
"region": "us-west-2",
|
||||
}
|
||||
|
||||
result = _create_bedrock_client(creds)
|
||||
|
||||
assert result == mock_client
|
||||
mock_boto_client.assert_called_once_with(
|
||||
service_name="bedrock",
|
||||
region_name="us-west-2",
|
||||
config=mock_config_instance,
|
||||
)
|
||||
mock_events.register.assert_called_once()
|
||||
call_args = mock_events.register.call_args
|
||||
assert call_args[0][0] == "before-send.*.*"
|
||||
|
||||
# Verify handler injects bearer token
|
||||
handler_fn = call_args[0][1]
|
||||
mock_request = MagicMock()
|
||||
mock_request.headers = {}
|
||||
handler_fn(mock_request)
|
||||
assert mock_request.headers["Authorization"] == f"Bearer {valid_api_key}"
|
||||
|
||||
|
||||
# TODO Move this to outputs/reports jobs
|
||||
@pytest.mark.django_db
|
||||
class TestGenerateOutputs:
|
||||
@@ -529,6 +725,7 @@ class TestGenerateOutputs:
|
||||
|
||||
|
||||
class TestScanCompleteTasks:
|
||||
@patch("tasks.tasks.aggregate_attack_surface_task.apply_async")
|
||||
@patch("tasks.tasks.create_compliance_requirements_task.apply_async")
|
||||
@patch("tasks.tasks.perform_scan_summary_task.si")
|
||||
@patch("tasks.tasks.generate_outputs_task.si")
|
||||
@@ -541,6 +738,7 @@ class TestScanCompleteTasks:
|
||||
mock_outputs_task,
|
||||
mock_scan_summary_task,
|
||||
mock_compliance_requirements_task,
|
||||
mock_attack_surface_task,
|
||||
):
|
||||
"""Test that scan complete tasks are properly orchestrated with optimized reports."""
|
||||
_perform_scan_complete_tasks("tenant-id", "scan-id", "provider-id")
|
||||
@@ -550,6 +748,11 @@ class TestScanCompleteTasks:
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"},
|
||||
)
|
||||
|
||||
# Verify attack surface task is called
|
||||
mock_attack_surface_task.assert_called_once_with(
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"},
|
||||
)
|
||||
|
||||
# Verify scan summary task is called
|
||||
mock_scan_summary_task.assert_called_once_with(
|
||||
scan_id="scan-id",
|
||||
@@ -1145,6 +1348,16 @@ class TestCheckLighthouseProviderConnectionTask:
|
||||
None,
|
||||
{"connected": True, "error": None},
|
||||
),
|
||||
# Bedrock API key authentication
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"api_key": "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110),
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
{"connected": True, "error": None},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_check_connection_success_all_providers(
|
||||
@@ -1213,6 +1426,24 @@ class TestCheckLighthouseProviderConnectionTask:
|
||||
"list_foundation_models",
|
||||
),
|
||||
),
|
||||
# Bedrock API key authentication failure
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"api_key": "ABSKQmVkcm9ja0FQSUtleS" + ("X" * 110),
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
ClientError(
|
||||
{
|
||||
"Error": {
|
||||
"Code": "UnrecognizedClientException",
|
||||
"Message": "Invalid API key",
|
||||
}
|
||||
},
|
||||
"list_foundation_models",
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_check_connection_api_failure(
|
||||
@@ -1337,6 +1568,17 @@ class TestRefreshLighthouseProviderModelsTask:
|
||||
{"openai.gpt-oss-120b-1:0": "gpt-oss-120b"},
|
||||
1,
|
||||
),
|
||||
# Bedrock API key authentication
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"api_key": "ABSKQmVkcm9ja0FQSUtleS" + ("A" * 110),
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
{"anthropic.claude-v3": "Claude 3"},
|
||||
1,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_refresh_models_create_new(
|
||||
|
||||
|
After Width: | Height: | Size: 90 KiB |
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -61,6 +61,7 @@ def create_layout_overview(
|
||||
html.Div(className="flex", id="gcp_card", n_clicks=0),
|
||||
html.Div(className="flex", id="k8s_card", n_clicks=0),
|
||||
html.Div(className="flex", id="m365_card", n_clicks=0),
|
||||
html.Div(className="flex", id="alibabacloud_card", n_clicks=0),
|
||||
],
|
||||
className=f"grid gap-x-4 mb-[30px] sm:grid-cols-2 lg:grid-cols-{amount_providers}",
|
||||
),
|
||||
|
||||
@@ -78,6 +78,8 @@ def load_csv_files(csv_files):
|
||||
result = result.replace("_KUBERNETES", " - KUBERNETES")
|
||||
if "M65" in result:
|
||||
result = result.replace("_M65", " - M65")
|
||||
if "ALIBABACLOUD" in result:
|
||||
result = result.replace("_ALIBABACLOUD", " - ALIBABACLOUD")
|
||||
results.append(result)
|
||||
|
||||
unique_results = set(results)
|
||||
@@ -125,7 +127,7 @@ if data is None:
|
||||
)
|
||||
else:
|
||||
|
||||
data["ASSESSMENTDATE"] = pd.to_datetime(data["ASSESSMENTDATE"])
|
||||
data["ASSESSMENTDATE"] = pd.to_datetime(data["ASSESSMENTDATE"], format="mixed")
|
||||
data["ASSESSMENT_TIME"] = data["ASSESSMENTDATE"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
data_values = data["ASSESSMENT_TIME"].unique()
|
||||
@@ -278,9 +280,13 @@ def display_data(
|
||||
data["REQUIREMENTS_ATTRIBUTES_PROFILE"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_PROFILE"
|
||||
].apply(lambda x: x.split(" - ")[0])
|
||||
|
||||
# Rename the column LOCATION to REGION for Alibaba Cloud
|
||||
if "alibabacloud" in analytics_input:
|
||||
data = data.rename(columns={"LOCATION": "REGION"})
|
||||
# Filter the chosen level of the CIS
|
||||
if is_level_1:
|
||||
data = data[data["REQUIREMENTS_ATTRIBUTES_PROFILE"] == "Level 1"]
|
||||
data = data[data["REQUIREMENTS_ATTRIBUTES_PROFILE"].str.contains("Level 1")]
|
||||
|
||||
# Rename the column PROJECTID to ACCOUNTID for GCP
|
||||
if data.columns.str.contains("PROJECTID").any():
|
||||
|
||||
@@ -79,6 +79,9 @@ ks8_provider_logo = html.Img(
|
||||
m365_provider_logo = html.Img(
|
||||
src="assets/images/providers/m365_provider.png", alt="m365 provider"
|
||||
)
|
||||
alibabacloud_provider_logo = html.Img(
|
||||
src="assets/images/providers/alibabacloud_provider.png", alt="alibabacloud provider"
|
||||
)
|
||||
|
||||
|
||||
def load_csv_files(csv_files):
|
||||
@@ -253,6 +256,8 @@ else:
|
||||
accounts.append(account + " - AWS")
|
||||
if "kubernetes" in list(data[data["ACCOUNT_UID"] == account]["PROVIDER"]):
|
||||
accounts.append(account + " - K8S")
|
||||
if "alibabacloud" in list(data[data["ACCOUNT_UID"] == account]["PROVIDER"]):
|
||||
accounts.append(account + " - ALIBABACLOUD")
|
||||
|
||||
account_dropdown = create_account_dropdown(accounts)
|
||||
|
||||
@@ -298,6 +303,8 @@ else:
|
||||
services.append(service + " - GCP")
|
||||
if "m365" in list(data[data["SERVICE_NAME"] == service]["PROVIDER"]):
|
||||
services.append(service + " - M365")
|
||||
if "alibabacloud" in list(data[data["SERVICE_NAME"] == service]["PROVIDER"]):
|
||||
services.append(service + " - ALIBABACLOUD")
|
||||
|
||||
services = ["All"] + services
|
||||
services = [
|
||||
@@ -520,6 +527,7 @@ else:
|
||||
Output("gcp_card", "children"),
|
||||
Output("k8s_card", "children"),
|
||||
Output("m365_card", "children"),
|
||||
Output("alibabacloud_card", "children"),
|
||||
Output("subscribe_card", "children"),
|
||||
Output("info-file-over", "title"),
|
||||
Output("severity-filter", "value"),
|
||||
@@ -537,6 +545,7 @@ else:
|
||||
Output("gcp_card", "n_clicks"),
|
||||
Output("k8s_card", "n_clicks"),
|
||||
Output("m365_card", "n_clicks"),
|
||||
Output("alibabacloud_card", "n_clicks"),
|
||||
],
|
||||
Input("cloud-account-filter", "value"),
|
||||
Input("region-filter", "value"),
|
||||
@@ -560,6 +569,7 @@ else:
|
||||
Input("sort_button_region", "n_clicks"),
|
||||
Input("sort_button_service", "n_clicks"),
|
||||
Input("sort_button_account", "n_clicks"),
|
||||
Input("alibabacloud_card", "n_clicks"),
|
||||
)
|
||||
def filter_data(
|
||||
cloud_account_values,
|
||||
@@ -584,6 +594,7 @@ def filter_data(
|
||||
sort_button_region,
|
||||
sort_button_service,
|
||||
sort_button_account,
|
||||
alibabacloud_clicks,
|
||||
):
|
||||
# Use n_clicks for vulture
|
||||
n_clicks_csv = n_clicks_csv
|
||||
@@ -599,6 +610,7 @@ def filter_data(
|
||||
gcp_clicks = 0
|
||||
k8s_clicks = 0
|
||||
m365_clicks = 0
|
||||
alibabacloud_clicks = 0
|
||||
if azure_clicks > 0:
|
||||
filtered_data = data.copy()
|
||||
if azure_clicks % 2 != 0 and "azure" in list(data["PROVIDER"]):
|
||||
@@ -607,6 +619,7 @@ def filter_data(
|
||||
gcp_clicks = 0
|
||||
k8s_clicks = 0
|
||||
m365_clicks = 0
|
||||
alibabacloud_clicks = 0
|
||||
if gcp_clicks > 0:
|
||||
filtered_data = data.copy()
|
||||
if gcp_clicks % 2 != 0 and "gcp" in list(data["PROVIDER"]):
|
||||
@@ -615,6 +628,7 @@ def filter_data(
|
||||
azure_clicks = 0
|
||||
k8s_clicks = 0
|
||||
m365_clicks = 0
|
||||
alibabacloud_clicks = 0
|
||||
if k8s_clicks > 0:
|
||||
filtered_data = data.copy()
|
||||
if k8s_clicks % 2 != 0 and "kubernetes" in list(data["PROVIDER"]):
|
||||
@@ -623,6 +637,7 @@ def filter_data(
|
||||
azure_clicks = 0
|
||||
gcp_clicks = 0
|
||||
m365_clicks = 0
|
||||
alibabacloud_clicks = 0
|
||||
if m365_clicks > 0:
|
||||
filtered_data = data.copy()
|
||||
if m365_clicks % 2 != 0 and "m365" in list(data["PROVIDER"]):
|
||||
@@ -631,7 +646,16 @@ def filter_data(
|
||||
azure_clicks = 0
|
||||
gcp_clicks = 0
|
||||
k8s_clicks = 0
|
||||
|
||||
alibabacloud_clicks = 0
|
||||
if alibabacloud_clicks > 0:
|
||||
filtered_data = data.copy()
|
||||
if alibabacloud_clicks % 2 != 0 and "alibabacloud" in list(data["PROVIDER"]):
|
||||
filtered_data = filtered_data[filtered_data["PROVIDER"] == "alibabacloud"]
|
||||
aws_clicks = 0
|
||||
azure_clicks = 0
|
||||
gcp_clicks = 0
|
||||
k8s_clicks = 0
|
||||
m365_clicks = 0
|
||||
# For all the data, we will add to the status column the value 'MUTED (FAIL)' and 'MUTED (PASS)' depending on the value of the column 'STATUS' and 'MUTED'
|
||||
if "MUTED" in filtered_data.columns:
|
||||
filtered_data["STATUS"] = filtered_data.apply(
|
||||
@@ -723,6 +747,8 @@ def filter_data(
|
||||
all_account_ids.append(account)
|
||||
if "kubernetes" in list(data[data["ACCOUNT_UID"] == account]["PROVIDER"]):
|
||||
all_account_ids.append(account)
|
||||
if "alibabacloud" in list(data[data["ACCOUNT_UID"] == account]["PROVIDER"]):
|
||||
all_account_ids.append(account)
|
||||
|
||||
all_account_names = []
|
||||
if "ACCOUNT_NAME" in filtered_data.columns:
|
||||
@@ -745,6 +771,10 @@ def filter_data(
|
||||
cloud_accounts_options.append(item + " - AWS")
|
||||
if "kubernetes" in list(data[data["ACCOUNT_UID"] == item]["PROVIDER"]):
|
||||
cloud_accounts_options.append(item + " - K8S")
|
||||
if "alibabacloud" in list(
|
||||
data[data["ACCOUNT_UID"] == item]["PROVIDER"]
|
||||
):
|
||||
cloud_accounts_options.append(item + " - ALIBABACLOUD")
|
||||
if "ACCOUNT_NAME" in filtered_data.columns:
|
||||
if "azure" in list(data[data["ACCOUNT_NAME"] == item]["PROVIDER"]):
|
||||
cloud_accounts_options.append(item + " - AZURE")
|
||||
@@ -873,6 +903,10 @@ def filter_data(
|
||||
filtered_data[filtered_data["SERVICE_NAME"] == item]["PROVIDER"]
|
||||
):
|
||||
service_filter_options.append(item + " - M365")
|
||||
if "alibabacloud" in list(
|
||||
filtered_data[filtered_data["SERVICE_NAME"] == item]["PROVIDER"]
|
||||
):
|
||||
service_filter_options.append(item + " - ALIBABACLOUD")
|
||||
|
||||
# Filter Service
|
||||
if service_values == ["All"]:
|
||||
@@ -1324,6 +1358,12 @@ def filter_data(
|
||||
filtered_data.loc[
|
||||
filtered_data["ACCOUNT_UID"] == account, "ACCOUNT_UID"
|
||||
] = (account + " - M365")
|
||||
if "alibabacloud" in list(
|
||||
data[data["ACCOUNT_UID"] == account]["PROVIDER"]
|
||||
):
|
||||
filtered_data.loc[
|
||||
filtered_data["ACCOUNT_UID"] == account, "ACCOUNT_UID"
|
||||
] = (account + " - ALIBABACLOUD")
|
||||
|
||||
table_collapsible = []
|
||||
for item in filtered_data.to_dict("records"):
|
||||
@@ -1410,6 +1450,13 @@ def filter_data(
|
||||
else:
|
||||
m365_card = None
|
||||
|
||||
if "alibabacloud" in list(data["PROVIDER"].unique()):
|
||||
alibabacloud_card = create_provider_card(
|
||||
"alibabacloud", alibabacloud_provider_logo, "Accounts", full_filtered_data
|
||||
)
|
||||
else:
|
||||
alibabacloud_card = None
|
||||
|
||||
# Subscribe to Prowler Cloud card
|
||||
subscribe_card = [
|
||||
html.Div(
|
||||
@@ -1454,6 +1501,7 @@ def filter_data(
|
||||
gcp_card,
|
||||
k8s_card,
|
||||
m365_card,
|
||||
alibabacloud_card,
|
||||
subscribe_card,
|
||||
list_files,
|
||||
severity_values,
|
||||
@@ -1469,6 +1517,7 @@ def filter_data(
|
||||
gcp_clicks,
|
||||
k8s_clicks,
|
||||
m365_clicks,
|
||||
alibabacloud_clicks,
|
||||
)
|
||||
else:
|
||||
return (
|
||||
@@ -1487,6 +1536,7 @@ def filter_data(
|
||||
gcp_card,
|
||||
k8s_card,
|
||||
m365_card,
|
||||
alibabacloud_card,
|
||||
subscribe_card,
|
||||
list_files,
|
||||
severity_values,
|
||||
@@ -1504,6 +1554,7 @@ def filter_data(
|
||||
gcp_clicks,
|
||||
k8s_clicks,
|
||||
m365_clicks,
|
||||
alibabacloud_clicks,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -213,3 +213,5 @@ Also is important to keep all code examples as short as possible, including the
|
||||
| software-supply-chain | Detects or prevents tampering, unauthorized packages, or third-party risks in software supply chain |
|
||||
| e3 | M365-specific controls enabled by or dependent on an E3 license (e.g., baseline security policies, conditional access) |
|
||||
| e5 | M365-specific controls enabled by or dependent on an E5 license (e.g., advanced threat protection, audit, DLP, and eDiscovery) |
|
||||
| privilege-escalation | Detects IAM policies or permissions that allow identities to elevate their privileges beyond their intended scope, potentially gaining administrator or higher-level access through specific action combinations |
|
||||
| ec2-imdsv1 | Identifies EC2 instances using Instance Metadata Service version 1 (IMDSv1), which is vulnerable to SSRF attacks and should be replaced with IMDSv2 for enhanced security |
|
||||
@@ -0,0 +1,447 @@
|
||||
---
|
||||
title: 'Extending the MCP Server'
|
||||
---
|
||||
|
||||
This guide explains how to extend the Prowler MCP Server with new tools and features.
|
||||
|
||||
<Info>
|
||||
**New to Prowler MCP Server?** Start with the user documentation:
|
||||
- [Overview](/getting-started/products/prowler-mcp) - Key capabilities, use cases, and deployment options
|
||||
- [Installation](/getting-started/installation/prowler-mcp) - Install locally or use the managed server
|
||||
- [Configuration](/getting-started/basic-usage/prowler-mcp) - Configure Claude Desktop, Cursor, and other MCP hosts
|
||||
- [Tools Reference](/getting-started/basic-usage/prowler-mcp-tools) - Complete list of all available tools
|
||||
</Info>
|
||||
|
||||
## Introduction
|
||||
|
||||
The Prowler MCP Server brings the entire Prowler ecosystem to AI assistants through the [Model Context Protocol (MCP)](https://modelcontextprotocol.io). It enables seamless integration with AI tools like Claude Desktop, Cursor, and other MCP clients.
|
||||
|
||||
The server follows a modular architecture with three independent sub-servers:
|
||||
|
||||
| Sub-Server | Auth Required | Description |
|
||||
|------------|---------------|-------------|
|
||||
| Prowler App | Yes | Full access to Prowler Cloud and Self-Managed features |
|
||||
| Prowler Hub | No | Security checks catalog with **over 1000 checks**, fixers, and **70+ compliance frameworks** |
|
||||
| Prowler Documentation | No | Full-text search and retrieval of official documentation |
|
||||
|
||||
<Note>
|
||||
For a complete list of tools and their descriptions, see the [Tools Reference](/getting-started/basic-usage/prowler-mcp-tools).
|
||||
</Note>
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The MCP Server architecture is illustrated in the [Overview documentation](/getting-started/products/prowler-mcp#mcp-server-architecture). AI assistants connect through the MCP protocol to access Prowler's three main components.
|
||||
|
||||
### Server Structure
|
||||
|
||||
The main server orchestrates three sub-servers with prefixed namespacing:
|
||||
|
||||
```
|
||||
mcp_server/prowler_mcp_server/
|
||||
├── server.py # Main orchestrator
|
||||
├── main.py # CLI entry point
|
||||
├── prowler_hub/
|
||||
├── prowler_app/
|
||||
│ ├── tools/ # Tool implementations
|
||||
│ ├── models/ # Pydantic models
|
||||
│ └── utils/ # API client, auth, loader
|
||||
└── prowler_documentation/
|
||||
```
|
||||
|
||||
### Tool Registration Patterns
|
||||
|
||||
The MCP Server uses two patterns for tool registration:
|
||||
|
||||
1. **Direct Decorators** (Prowler Hub/Docs): Tools are registered using `@mcp.tool()` decorators
|
||||
2. **Auto-Discovery** (Prowler App): All public methods of `BaseTool` subclasses are auto-registered
|
||||
|
||||
## Adding Tools to Prowler App
|
||||
|
||||
### Step 1: Create the Tool Class
|
||||
|
||||
Create a new file or add to an existing file in `prowler_app/tools/`:
|
||||
|
||||
```python
|
||||
# prowler_app/tools/new_feature.py
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.new_feature import (
|
||||
FeatureListResponse,
|
||||
DetailedFeature,
|
||||
)
|
||||
from prowler_mcp_server.prowler_app.tools.base import BaseTool
|
||||
|
||||
|
||||
class NewFeatureTools(BaseTool):
|
||||
"""Tools for managing new features."""
|
||||
|
||||
async def list_features(
|
||||
self,
|
||||
status: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by status (active, inactive, pending)"
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50,
|
||||
description="Number of results per page (1-100)"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""List all features with optional filtering.
|
||||
|
||||
Returns a lightweight list of features optimized for LLM consumption.
|
||||
Use get_feature for complete information about a specific feature.
|
||||
"""
|
||||
# Validate parameters
|
||||
self.api_client.validate_page_size(page_size)
|
||||
|
||||
# Build query parameters
|
||||
params: dict[str, Any] = {"page[size]": page_size}
|
||||
if status:
|
||||
params["filter[status]"] = status
|
||||
|
||||
# Make API request
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
response = await self.api_client.get("/api/v1/features", params=clean_params)
|
||||
|
||||
# Transform to LLM-friendly format
|
||||
return FeatureListResponse.from_api_response(response).model_dump()
|
||||
|
||||
async def get_feature(
|
||||
self,
|
||||
feature_id: str = Field(description="The UUID of the feature"),
|
||||
) -> dict[str, Any]:
|
||||
"""Get detailed information about a specific feature.
|
||||
|
||||
Returns complete feature details including configuration and metadata.
|
||||
"""
|
||||
try:
|
||||
response = await self.api_client.get(f"/api/v1/features/{feature_id}")
|
||||
return DetailedFeature.from_api_response(response["data"]).model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to get feature {feature_id}: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
```
|
||||
|
||||
### Step 2: Create the Models
|
||||
|
||||
Create corresponding models in `prowler_app/models/`:
|
||||
|
||||
```python
|
||||
# prowler_app/models/new_feature.py
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
|
||||
|
||||
|
||||
class SimplifiedFeature(MinimalSerializerMixin):
|
||||
"""Lightweight feature for list operations."""
|
||||
|
||||
id: str = Field(description="Unique feature identifier")
|
||||
name: str = Field(description="Feature name")
|
||||
status: str = Field(description="Current status")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict[str, Any]) -> "SimplifiedFeature":
|
||||
"""Transform API response to simplified format."""
|
||||
attributes = data.get("attributes", {})
|
||||
return cls(
|
||||
id=data["id"],
|
||||
name=attributes["name"],
|
||||
status=attributes["status"],
|
||||
)
|
||||
|
||||
|
||||
class DetailedFeature(SimplifiedFeature):
|
||||
"""Extended feature with complete details."""
|
||||
|
||||
description: str | None = Field(default=None, description="Feature description")
|
||||
configuration: dict[str, Any] | None = Field(default=None, description="Configuration")
|
||||
created_at: str = Field(description="Creation timestamp")
|
||||
updated_at: str = Field(description="Last update timestamp")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict[str, Any]) -> "DetailedFeature":
|
||||
"""Transform API response to detailed format."""
|
||||
attributes = data.get("attributes", {})
|
||||
return cls(
|
||||
id=data["id"],
|
||||
name=attributes["name"],
|
||||
status=attributes["status"],
|
||||
description=attributes.get("description"),
|
||||
configuration=attributes.get("configuration"),
|
||||
created_at=attributes["created_at"],
|
||||
updated_at=attributes["updated_at"],
|
||||
)
|
||||
|
||||
|
||||
class FeatureListResponse(MinimalSerializerMixin):
|
||||
"""Response wrapper for feature list operations."""
|
||||
|
||||
count: int = Field(description="Total number of features")
|
||||
features: list[SimplifiedFeature] = Field(description="List of features")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict[str, Any]) -> "FeatureListResponse":
|
||||
"""Transform API response to list format."""
|
||||
data = response.get("data", [])
|
||||
features = [SimplifiedFeature.from_api_response(item) for item in data]
|
||||
return cls(count=len(features), features=features)
|
||||
```
|
||||
|
||||
### Step 3: Verify Auto-Discovery
|
||||
|
||||
No manual registration is needed. The `tool_loader.py` automatically discovers and registers all `BaseTool` subclasses. Verify your tool is loaded by checking the server logs:
|
||||
|
||||
```
|
||||
INFO - Auto-registered 2 tools from NewFeatureTools
|
||||
INFO - Loaded and registered: NewFeatureTools
|
||||
```
|
||||
|
||||
## Adding Tools to Prowler Hub/Docs
|
||||
|
||||
For Prowler Hub or Documentation tools, use the `@mcp.tool()` decorator directly:
|
||||
|
||||
```python
|
||||
# prowler_hub/server.py
|
||||
from fastmcp import FastMCP
|
||||
|
||||
hub_mcp_server = FastMCP("prowler-hub")
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def get_new_artifact(
|
||||
artifact_id: str,
|
||||
) -> dict:
|
||||
"""Fetch a specific artifact from Prowler Hub.
|
||||
|
||||
Args:
|
||||
artifact_id: The unique identifier of the artifact
|
||||
|
||||
Returns:
|
||||
Dictionary containing artifact details
|
||||
"""
|
||||
response = prowler_hub_client.get(f"/artifact/{artifact_id}")
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
```
|
||||
|
||||
## Model Design Patterns
|
||||
|
||||
### MinimalSerializerMixin
|
||||
|
||||
All models should use `MinimalSerializerMixin` to optimize responses for LLM consumption:
|
||||
|
||||
```python
|
||||
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
|
||||
|
||||
class MyModel(MinimalSerializerMixin):
|
||||
"""Model that excludes empty values from serialization."""
|
||||
required_field: str
|
||||
optional_field: str | None = None # Excluded if None
|
||||
empty_list: list = [] # Excluded if empty
|
||||
```
|
||||
|
||||
This mixin automatically excludes:
|
||||
- `None` values
|
||||
- Empty strings
|
||||
- Empty lists
|
||||
- Empty dictionaries
|
||||
|
||||
### Two-Tier Model Pattern
|
||||
|
||||
Use two-tier models for efficient responses:
|
||||
|
||||
- **Simplified**: Lightweight models for list operations
|
||||
- **Detailed**: Extended models for single-item retrieval
|
||||
|
||||
```python
|
||||
class SimplifiedItem(MinimalSerializerMixin):
|
||||
"""Use for list operations - minimal fields."""
|
||||
id: str
|
||||
name: str
|
||||
status: str
|
||||
|
||||
class DetailedItem(SimplifiedItem):
|
||||
"""Use for get operations - extends simplified with details."""
|
||||
description: str | None = None
|
||||
configuration: dict | None = None
|
||||
created_at: str
|
||||
updated_at: str
|
||||
```
|
||||
|
||||
### Factory Method Pattern
|
||||
|
||||
Always implement `from_api_response()` for API transformation:
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict[str, Any]) -> "MyModel":
|
||||
"""Transform API response to model.
|
||||
|
||||
This method handles the JSON:API format used by Prowler API,
|
||||
extracting attributes and relationships as needed.
|
||||
"""
|
||||
attributes = data.get("attributes", {})
|
||||
return cls(
|
||||
id=data["id"],
|
||||
name=attributes["name"],
|
||||
# ... map other fields
|
||||
)
|
||||
```
|
||||
|
||||
## API Client Usage
|
||||
|
||||
The `ProwlerAPIClient` is a singleton that handles authentication and HTTP requests:
|
||||
|
||||
```python
|
||||
class MyTools(BaseTool):
|
||||
async def my_tool(self) -> dict:
|
||||
# GET request
|
||||
response = await self.api_client.get("/api/v1/endpoint", params={"key": "value"})
|
||||
|
||||
# POST request
|
||||
response = await self.api_client.post(
|
||||
"/api/v1/endpoint",
|
||||
json_data={"data": {"type": "items", "attributes": {...}}}
|
||||
)
|
||||
|
||||
# PATCH request
|
||||
response = await self.api_client.patch(
|
||||
f"/api/v1/endpoint/{id}",
|
||||
json_data={"data": {"attributes": {...}}}
|
||||
)
|
||||
|
||||
# DELETE request
|
||||
response = await self.api_client.delete(f"/api/v1/endpoint/{id}")
|
||||
```
|
||||
|
||||
### Helper Methods
|
||||
|
||||
The API client provides useful helper methods:
|
||||
|
||||
```python
|
||||
# Validate page size (1-1000)
|
||||
self.api_client.validate_page_size(page_size)
|
||||
|
||||
# Normalize date range with max days limit
|
||||
date_range = self.api_client.normalize_date_range(date_from, date_to, max_days=2)
|
||||
|
||||
# Build filter parameters (handles type conversion)
|
||||
clean_params = self.api_client.build_filter_params({
|
||||
"filter[status]": "active",
|
||||
"filter[severity__in]": ["high", "critical"], # Converts to comma-separated
|
||||
"filter[muted]": True, # Converts to "true"
|
||||
})
|
||||
|
||||
# Poll async task until completion
|
||||
result = await self.api_client.poll_task_until_complete(
|
||||
task_id=task_id,
|
||||
timeout=60,
|
||||
poll_interval=1.0
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Tool Docstrings
|
||||
|
||||
Tool docstrings become description that is going to be read by the LLM. Provide clear usage instructions and common workflows:
|
||||
|
||||
```python
|
||||
async def search_items(self, status: str = Field(...)) -> dict:
|
||||
"""Search items with advanced filtering.
|
||||
|
||||
Returns a lightweight list optimized for LLM consumption.
|
||||
Use get_item for complete details about a specific item.
|
||||
|
||||
Common workflows:
|
||||
- Find critical items: status="critical"
|
||||
- Find recent items: Use date_from parameter
|
||||
"""
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
Return structured error responses instead of raising exceptions:
|
||||
|
||||
```python
|
||||
async def get_item(self, item_id: str) -> dict:
|
||||
try:
|
||||
response = await self.api_client.get(f"/api/v1/items/{item_id}")
|
||||
return DetailedItem.from_api_response(response["data"]).model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to get item {item_id}: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
```
|
||||
|
||||
### Parameter Descriptions
|
||||
|
||||
Use Pydantic `Field()` with clear descriptions. This also helps LLMs understand
|
||||
the purpose of each parameter, so be as descriptive as possible:
|
||||
|
||||
```python
|
||||
async def list_items(
|
||||
self,
|
||||
severity: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by severity levels (critical, high, medium, low)"
|
||||
),
|
||||
status: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by status (PASS, FAIL, MANUAL)"
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50,
|
||||
description="Results per page"
|
||||
),
|
||||
) -> dict:
|
||||
```
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Navigate to MCP server directory
|
||||
cd mcp_server
|
||||
|
||||
# Run in STDIO mode (default)
|
||||
uv run prowler-mcp
|
||||
|
||||
# Run in HTTP mode
|
||||
uv run prowler-mcp --transport http --host 0.0.0.0 --port 8000
|
||||
|
||||
# Run with environment variables
|
||||
PROWLER_APP_API_KEY="pk_xxx" uv run prowler-mcp
|
||||
```
|
||||
|
||||
For complete installation and deployment options, see:
|
||||
- [Installation Guide](/getting-started/installation/prowler-mcp#from-source-development) - Development setup instructions
|
||||
- [Configuration Guide](/getting-started/basic-usage/prowler-mcp) - MCP client configuration
|
||||
|
||||
For development I recommend to use the [Model Context Protocol Inspector](https://github.com/modelcontextprotocol/inspector) as MCP client to test and debug your tools.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="MCP Server Overview" icon="circle-info" href="/getting-started/products/prowler-mcp">
|
||||
Key capabilities, use cases, and deployment options
|
||||
</Card>
|
||||
<Card title="Tools Reference" icon="wrench" href="/getting-started/basic-usage/prowler-mcp-tools">
|
||||
Complete reference of all available tools
|
||||
</Card>
|
||||
<Card title="Prowler Hub" icon="database" href="/getting-started/products/prowler-hub">
|
||||
Security checks and compliance frameworks catalog
|
||||
</Card>
|
||||
<Card title="Lighthouse AI" icon="robot" href="/getting-started/products/prowler-lighthouse-ai">
|
||||
AI-powered security analyst
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [MCP Protocol Specification](https://modelcontextprotocol.io) - Model Context Protocol details
|
||||
- [Prowler API Documentation](https://api.prowler.com/api/v1/docs) - API reference
|
||||
- [Prowler Hub API](https://hub.prowler.com/api/docs) - Hub API reference
|
||||
- [GitHub Repository](https://github.com/prowler-cloud/prowler) - Source code
|
||||
@@ -63,6 +63,82 @@ Other Commands for Running Tests
|
||||
Refer to the [pytest documentation](https://docs.pytest.org/en/7.1.x/getting-started.html) for more details.
|
||||
|
||||
</Note>
|
||||
|
||||
## AWS Service Dependency Table (CI Optimization)
|
||||
|
||||
To optimize CI pipeline execution time, the GitHub Actions workflow for AWS tests uses a **service dependency table** that determines which tests to run based on changed files. This ensures that when a service is modified, all dependent services are also tested.
|
||||
|
||||
### How It Works
|
||||
|
||||
The dependency table is defined in `.github/workflows/sdk-tests.yml` within the "Resolve AWS services under test" step. When files in a specific AWS service are changed:
|
||||
|
||||
1. Tests for the changed service are run
|
||||
2. Tests for all services that **depend on** the changed service are also run
|
||||
|
||||
For example, if you modify the `ec2` service, tests will also run for `dlm`, `dms`, `elbv2`, `emr`, `inspector2`, `rds`, `redshift`, `route53`, `shield`, `ssm`, and `workspaces` because these services use the EC2 client.
|
||||
|
||||
### Current Dependency Table
|
||||
|
||||
The table maps a service (key) to the list of services that depend on it (values):
|
||||
|
||||
| Service | Dependent Services |
|
||||
|---------|-------------------|
|
||||
| `acm` | `elb` |
|
||||
| `autoscaling` | `dynamodb` |
|
||||
| `awslambda` | `ec2`, `inspector2` |
|
||||
| `backup` | `dynamodb`, `ec2`, `rds` |
|
||||
| `cloudfront` | `shield` |
|
||||
| `cloudtrail` | `awslambda`, `cloudwatch` |
|
||||
| `cloudwatch` | `bedrock` |
|
||||
| `ec2` | `dlm`, `dms`, `elbv2`, `emr`, `inspector2`, `rds`, `redshift`, `route53`, `shield`, `ssm` |
|
||||
| `ecr` | `inspector2` |
|
||||
| `elb` | `shield` |
|
||||
| `elbv2` | `shield` |
|
||||
| `globalaccelerator` | `shield` |
|
||||
| `iam` | `bedrock`, `cloudtrail`, `cloudwatch`, `codebuild` |
|
||||
| `kafka` | `firehose` |
|
||||
| `kinesis` | `firehose` |
|
||||
| `kms` | `kafka` |
|
||||
| `organizations` | `iam`, `servicecatalog` |
|
||||
| `route53` | `shield` |
|
||||
| `s3` | `bedrock`, `cloudfront`, `cloudtrail`, `macie` |
|
||||
| `ssm` | `ec2` |
|
||||
| `vpc` | `awslambda`, `ec2`, `efs`, `elasticache`, `neptune`, `networkfirewall`, `rds`, `redshift`, `workspaces` |
|
||||
| `waf` | `elbv2` |
|
||||
| `wafv2` | `cognito`, `elbv2` |
|
||||
|
||||
### When to Update the Table
|
||||
|
||||
You must update the dependency table when:
|
||||
|
||||
1. **A new check or service uses another service's client**: If your check imports a client from another service (e.g., `from prowler.providers.aws.services.ec2.ec2_client import ec2_client` in a non-ec2 check), add your service to the dependent services list of that client's service.
|
||||
|
||||
2. **A service relationship changes**: If you remove or add a service client dependency in an existing check, update the table accordingly.
|
||||
|
||||
### How to Update the Table
|
||||
|
||||
1. Open `.github/workflows/sdk-tests.yml`
|
||||
2. Find the `dependents` dictionary in the "Resolve AWS services under test" step
|
||||
3. Add or modify entries as needed
|
||||
4. **Update this documentation page** (`docs/developer-guide/unit-testing.mdx`) to reflect the changes in the [Current Dependency Table](#current-dependency-table) section above
|
||||
|
||||
```python
|
||||
dependents = {
|
||||
# ... existing entries ...
|
||||
"service_being_used": ["service_that_uses_it"],
|
||||
}
|
||||
```
|
||||
|
||||
**Example**: If you create a new check in the `newservice` service that imports `ec2_client`, add `newservice` to the `ec2` entry:
|
||||
|
||||
```python
|
||||
"ec2": ["dlm", "dms", "elbv2", "emr", "inspector2", "newservice", "rds", "redshift", "route53", "shield", "ssm"],
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Failing to update this table when adding cross-service dependencies may result in CI tests passing even when related functionality is broken, as the dependent service tests won't be triggered.
|
||||
</Warning>
|
||||
|
||||
## AWS Testing Approaches
|
||||
|
||||
For AWS provider, different testing approaches apply based on API coverage based on several criteria.
|
||||
|
||||
@@ -19,9 +19,7 @@
|
||||
"groups": [
|
||||
{
|
||||
"group": "Welcome",
|
||||
"pages": [
|
||||
"introduction"
|
||||
]
|
||||
"pages": ["introduction"]
|
||||
},
|
||||
{
|
||||
"group": "Prowler Cloud",
|
||||
@@ -51,9 +49,7 @@
|
||||
},
|
||||
{
|
||||
"group": "Prowler Lighthouse AI",
|
||||
"pages": [
|
||||
"user-guide/tutorials/prowler-app-lighthouse"
|
||||
]
|
||||
"pages": ["getting-started/products/prowler-lighthouse-ai"]
|
||||
},
|
||||
{
|
||||
"group": "Prowler MCP Server",
|
||||
@@ -109,7 +105,13 @@
|
||||
"user-guide/tutorials/prowler-app-jira-integration"
|
||||
]
|
||||
},
|
||||
"user-guide/tutorials/prowler-app-lighthouse",
|
||||
{
|
||||
"group": "Lighthouse AI",
|
||||
"pages": [
|
||||
"user-guide/tutorials/prowler-app-lighthouse",
|
||||
"user-guide/tutorials/prowler-app-lighthouse-multi-llm"
|
||||
]
|
||||
},
|
||||
"user-guide/tutorials/prowler-cloud-public-ips",
|
||||
{
|
||||
"group": "Tutorials",
|
||||
@@ -147,9 +149,7 @@
|
||||
"user-guide/cli/tutorials/quick-inventory",
|
||||
{
|
||||
"group": "Tutorials",
|
||||
"pages": [
|
||||
"user-guide/cli/tutorials/parallel-execution"
|
||||
]
|
||||
"pages": ["user-guide/cli/tutorials/parallel-execution"]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -192,6 +192,13 @@
|
||||
"user-guide/providers/gcp/retry-configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Alibaba Cloud",
|
||||
"pages": [
|
||||
"user-guide/providers/alibabacloud/getting-started-alibabacloud",
|
||||
"user-guide/providers/alibabacloud/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Kubernetes",
|
||||
"pages": [
|
||||
@@ -230,9 +237,7 @@
|
||||
},
|
||||
{
|
||||
"group": "LLM",
|
||||
"pages": [
|
||||
"user-guide/providers/llm/getting-started-llm"
|
||||
]
|
||||
"pages": ["user-guide/providers/llm/getting-started-llm"]
|
||||
},
|
||||
{
|
||||
"group": "Oracle Cloud Infrastructure",
|
||||
@@ -245,9 +250,7 @@
|
||||
},
|
||||
{
|
||||
"group": "Compliance",
|
||||
"pages": [
|
||||
"user-guide/compliance/tutorials/threatscore"
|
||||
]
|
||||
"pages": ["user-guide/compliance/tutorials/threatscore"]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -264,7 +267,8 @@
|
||||
"developer-guide/outputs",
|
||||
"developer-guide/integrations",
|
||||
"developer-guide/security-compliance-framework",
|
||||
"developer-guide/lighthouse"
|
||||
"developer-guide/lighthouse",
|
||||
"developer-guide/mcp-server"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -300,21 +304,15 @@
|
||||
},
|
||||
{
|
||||
"tab": "Security",
|
||||
"pages": [
|
||||
"security"
|
||||
]
|
||||
"pages": ["security"]
|
||||
},
|
||||
{
|
||||
"tab": "Contact Us",
|
||||
"pages": [
|
||||
"contact"
|
||||
]
|
||||
"pages": ["contact"]
|
||||
},
|
||||
{
|
||||
"tab": "Troubleshooting",
|
||||
"pages": [
|
||||
"troubleshooting"
|
||||
]
|
||||
"pages": ["troubleshooting"]
|
||||
},
|
||||
{
|
||||
"tab": "About Us",
|
||||
|
||||
@@ -10,7 +10,7 @@ Complete reference guide for all tools available in the Prowler MCP Server. Tool
|
||||
|----------|------------|------------------------|
|
||||
| Prowler Hub | 10 tools | No |
|
||||
| Prowler Documentation | 2 tools | No |
|
||||
| Prowler Cloud/App | 28 tools | Yes |
|
||||
| Prowler Cloud/App | 22 tools | Yes |
|
||||
|
||||
## Tool Naming Convention
|
||||
|
||||
@@ -20,6 +20,66 @@ All tools follow a consistent naming pattern with prefixes:
|
||||
- `prowler_docs_*` - Prowler documentation search and retrieval
|
||||
- `prowler_app_*` - Prowler Cloud and App (Self-Managed) management tools
|
||||
|
||||
## Prowler Cloud/App Tools
|
||||
|
||||
Manage Prowler Cloud or Prowler App (Self-Managed) features. **Requires authentication.**
|
||||
|
||||
<Note>
|
||||
These tools require a valid API key. See the [Configuration Guide](/getting-started/basic-usage/prowler-mcp) for authentication setup.
|
||||
</Note>
|
||||
|
||||
### Findings Management
|
||||
|
||||
Tools for searching, viewing, and analyzing security findings across all cloud providers.
|
||||
|
||||
- **`prowler_app_search_security_findings`** - Search and filter security findings with advanced filtering options (severity, status, provider, region, service, check ID, date range, muted status)
|
||||
- **`prowler_app_get_finding_details`** - Get comprehensive details about a specific finding including remediation guidance, check metadata, and resource relationships
|
||||
- **`prowler_app_get_findings_overview`** - Get aggregate statistics and trends about security findings as a markdown report
|
||||
|
||||
### Provider Management
|
||||
|
||||
Tools for managing cloud provider connections in Prowler.
|
||||
|
||||
- **`prowler_app_search_providers`** - Search and view configured providers with their connection status
|
||||
- **`prowler_app_connect_provider`** - Register and connect a provider with credentials for security scanning
|
||||
- **`prowler_app_delete_provider`** - Permanently remove a provider from Prowler
|
||||
|
||||
### Scan Management
|
||||
|
||||
Tools for managing and monitoring security scans.
|
||||
|
||||
- **`prowler_app_list_scans`** - List and filter security scans across all providers
|
||||
- **`prowler_app_get_scan`** - Get comprehensive details about a specific scan (progress, duration, resource counts)
|
||||
- **`prowler_app_trigger_scan`** - Trigger a manual security scan for a provider
|
||||
- **`prowler_app_schedule_daily_scan`** - Schedule automated daily scans for continuous monitoring
|
||||
- **`prowler_app_update_scan`** - Update scan name for better organization
|
||||
|
||||
### Resources Management
|
||||
|
||||
Tools for searching, viewing, and analyzing cloud resources discovered by Prowler.
|
||||
|
||||
- **`prowler_app_list_resources`** - List and filter cloud resources with advanced filtering options (provider, region, service, resource type, tags)
|
||||
- **`prowler_app_get_resource`** - Get comprehensive details about a specific resource including configuration, metadata, and finding relationships
|
||||
- **`prowler_app_get_resources_overview`** - Get aggregate statistics about cloud resources as a markdown report
|
||||
|
||||
### Muting Management
|
||||
|
||||
Tools for managing finding muting, including pattern-based bulk muting (mutelist) and finding-specific mute rules.
|
||||
|
||||
#### Mutelist (Pattern-Based Muting)
|
||||
|
||||
- **`prowler_app_get_mutelist`** - Retrieve the current mutelist configuration for the tenant
|
||||
- **`prowler_app_set_mutelist`** - Create or update the mutelist configuration for pattern-based bulk muting
|
||||
- **`prowler_app_delete_mutelist`** - Remove the mutelist configuration from the tenant
|
||||
|
||||
#### Mute Rules (Finding-Specific Muting)
|
||||
|
||||
- **`prowler_app_list_mute_rules`** - Search and filter mute rules with pagination support
|
||||
- **`prowler_app_get_mute_rule`** - Retrieve comprehensive details about a specific mute rule
|
||||
- **`prowler_app_create_mute_rule`** - Create a new mute rule to mute specific findings with documentation and audit trail
|
||||
- **`prowler_app_update_mute_rule`** - Update a mute rule's name, reason, or enabled status
|
||||
- **`prowler_app_delete_mute_rule`** - Delete a mute rule from the system
|
||||
|
||||
## Prowler Hub Tools
|
||||
|
||||
Access Prowler's security check catalog and compliance frameworks. **No authentication required.**
|
||||
@@ -53,60 +113,6 @@ Search and access official Prowler documentation. **No authentication required.*
|
||||
- **`prowler_docs_search`** - Search the official Prowler documentation using full-text search
|
||||
- **`prowler_docs_get_document`** - Retrieve the full markdown content of a specific documentation file
|
||||
|
||||
## Prowler Cloud/App Tools
|
||||
|
||||
Manage Prowler Cloud or Prowler App (Self-Managed) features. **Requires authentication.**
|
||||
|
||||
<Note>
|
||||
These tools require a valid API key. See the [Configuration Guide](/getting-started/basic-usage/prowler-mcp) for authentication setup.
|
||||
</Note>
|
||||
|
||||
### Findings Management
|
||||
|
||||
- **`prowler_app_list_findings`** - List security findings with advanced filtering
|
||||
- **`prowler_app_get_finding`** - Get detailed information about a specific finding
|
||||
- **`prowler_app_get_latest_findings`** - Retrieve latest findings from the most recent scans
|
||||
- **`prowler_app_get_findings_metadata`** - Get unique metadata values from filtered findings
|
||||
- **`prowler_app_get_latest_findings_metadata`** - Get metadata from latest findings across all providers
|
||||
|
||||
### Provider Management
|
||||
|
||||
- **`prowler_app_list_providers`** - List all providers with filtering options
|
||||
- **`prowler_app_create_provider`** - Create a new provider in the current tenant
|
||||
- **`prowler_app_get_provider`** - Get detailed information about a specific provider
|
||||
- **`prowler_app_update_provider`** - Update provider details (alias, etc.)
|
||||
- **`prowler_app_delete_provider`** - Delete a specific provider
|
||||
- **`prowler_app_test_provider_connection`** - Test provider connection status
|
||||
|
||||
### Provider Secrets Management
|
||||
|
||||
- **`prowler_app_list_provider_secrets`** - List all provider secrets with filtering
|
||||
- **`prowler_app_add_provider_secret`** - Add or update credentials for a provider
|
||||
- **`prowler_app_get_provider_secret`** - Get detailed information about a provider secret
|
||||
- **`prowler_app_update_provider_secret`** - Update provider secret details
|
||||
- **`prowler_app_delete_provider_secret`** - Delete a provider secret
|
||||
|
||||
### Scan Management
|
||||
|
||||
- **`prowler_app_list_scans`** - List all scans with filtering options
|
||||
- **`prowler_app_create_scan`** - Trigger a manual scan for a specific provider
|
||||
- **`prowler_app_get_scan`** - Get detailed information about a specific scan
|
||||
- **`prowler_app_update_scan`** - Update scan details
|
||||
- **`prowler_app_get_scan_compliance_report`** - Download compliance report as CSV
|
||||
- **`prowler_app_get_scan_report`** - Download ZIP file containing complete scan report
|
||||
|
||||
### Schedule Management
|
||||
|
||||
- **`prowler_app_schedules_daily_scan`** - Create a daily scheduled scan for a provider
|
||||
|
||||
### Processor Management
|
||||
|
||||
- **`prowler_app_processors_list`** - List all processors with filtering
|
||||
- **`prowler_app_processors_create`** - Create a new processor (currently only mute lists supported)
|
||||
- **`prowler_app_processors_retrieve`** - Get processor details by ID
|
||||
- **`prowler_app_processors_partial_update`** - Update processor configuration
|
||||
- **`prowler_app_processors_destroy`** - Delete a processor
|
||||
|
||||
## Usage Tips
|
||||
|
||||
- Use natural language to interact with the tools through your AI assistant
|
||||
|
||||
@@ -139,7 +139,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
"args": ["/absolute/path/to/prowler/mcp_server/"],
|
||||
"env": {
|
||||
"PROWLER_APP_API_KEY": "<your-api-key-here>",
|
||||
"PROWLER_API_BASE_URL": "https://api.prowler.com"
|
||||
"API_BASE_URL": "https://api.prowler.com/api/v1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,7 +147,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
```
|
||||
|
||||
<Note>
|
||||
Replace `/absolute/path/to/prowler/mcp_server/` with the actual path. The `PROWLER_API_BASE_URL` is optional and defaults to Prowler Cloud API.
|
||||
Replace `/absolute/path/to/prowler/mcp_server/` with the actual path. The `API_BASE_URL` is optional and defaults to Prowler Cloud API.
|
||||
</Note>
|
||||
|
||||
</Tab>
|
||||
@@ -167,7 +167,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
"--env",
|
||||
"PROWLER_APP_API_KEY=<your-api-key-here>",
|
||||
"--env",
|
||||
"PROWLER_API_BASE_URL=https://api.prowler.com",
|
||||
"API_BASE_URL=https://api.prowler.com/api/v1",
|
||||
"prowlercloud/prowler-mcp"
|
||||
]
|
||||
}
|
||||
@@ -176,7 +176,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
```
|
||||
|
||||
<Note>
|
||||
The `PROWLER_API_BASE_URL` is optional and defaults to Prowler Cloud API.
|
||||
The `API_BASE_URL` is optional and defaults to Prowler Cloud API.
|
||||
</Note>
|
||||
|
||||
</Tab>
|
||||
|
||||
@@ -4,12 +4,12 @@ title: "Installation"
|
||||
|
||||
### Installation
|
||||
|
||||
Prowler App supports multiple installation methods based on your environment.
|
||||
Prowler App offers flexible installation methods tailored to various environments.
|
||||
|
||||
Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detailed usage instructions.
|
||||
|
||||
<Warning>
|
||||
Prowler configuration is based in `.env` files. Every version of Prowler can have differences on that file, so, please, use the file that corresponds with that version or repository branch or tag.
|
||||
Prowler configuration is based on `.env` files. Every version of Prowler can have differences on that file, so, please, use the file that corresponds with that version or repository branch or tag.
|
||||
</Warning>
|
||||
|
||||
<Tabs>
|
||||
@@ -26,8 +26,6 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
curl -sLO "https://raw.githubusercontent.com/prowler-cloud/prowler/refs/tags/${VERSION}/.env"
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
</Tab>
|
||||
<Tab title="GitHub">
|
||||
_Requirements_:
|
||||
@@ -106,11 +104,13 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Update Prowler App
|
||||
### Updating Prowler App
|
||||
|
||||
Upgrade Prowler App installation using one of two options:
|
||||
|
||||
#### Option 1: Update Environment File
|
||||
#### Option 1: Updating the Environment File
|
||||
|
||||
To update the environment file:
|
||||
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
@@ -119,7 +119,7 @@ PROWLER_UI_VERSION="5.9.0"
|
||||
PROWLER_API_VERSION="5.9.0"
|
||||
```
|
||||
|
||||
#### Option 2: Use Docker Compose Pull
|
||||
#### Option 2: Using Docker Compose Pull
|
||||
|
||||
```bash
|
||||
docker compose pull --policy always
|
||||
@@ -133,7 +133,7 @@ The `--policy always` flag ensures that Docker pulls the latest images even if t
|
||||
Everything is preserved, nothing will be deleted after the update.
|
||||
</Note>
|
||||
|
||||
### Troubleshooting
|
||||
### Troubleshooting Installation Issues
|
||||
|
||||
If containers don't start, check logs for errors:
|
||||
|
||||
@@ -145,16 +145,16 @@ docker compose logs
|
||||
docker images | grep prowler
|
||||
```
|
||||
|
||||
If you encounter issues, you can rollback to the previous version by changing the `.env` file back to your previous version and running:
|
||||
If issues are encountered, rollback to the previous version by changing the `.env` file back to the previous version and running:
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Container versions
|
||||
### Container Versions
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
The available versions of Prowler App are the following:
|
||||
|
||||
- `latest`: in sync with `master` branch (please note that it is not a stable version)
|
||||
- `v4-latest`: in sync with `v4` branch (please note that it is not a stable version)
|
||||
|
||||
@@ -4,7 +4,7 @@ title: 'Installation'
|
||||
|
||||
## Installation
|
||||
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/). Install it as a Python package with `Python >= 3.9, <= 3.12`:
|
||||
To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/):
|
||||
|
||||
<Tabs>
|
||||
<Tab title="pipx">
|
||||
@@ -41,7 +41,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
prowler -v
|
||||
```
|
||||
|
||||
Upgrade Prowler to the latest version:
|
||||
To upgrade Prowler to the latest version:
|
||||
|
||||
``` bash
|
||||
pip install --upgrade prowler
|
||||
@@ -54,8 +54,6 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
* In the command below, change `-v` to your local directory path in order to access the reports.
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
|
||||
_Commands_:
|
||||
|
||||
``` bash
|
||||
@@ -75,7 +73,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler
|
||||
poetry install
|
||||
@@ -94,7 +92,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
```bash
|
||||
python3 -m pip install --user pipx
|
||||
python3 -m pipx ensurepath
|
||||
pipx install prowler
|
||||
@@ -104,7 +102,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
<Tab title="Ubuntu">
|
||||
_Requirements_:
|
||||
|
||||
* `Ubuntu 23.04` or above, if you are using an older version of Ubuntu check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure you have `Python >= 3.9, <= 3.12`.
|
||||
* `Ubuntu 23.04` or above. For older Ubuntu versions, check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure `Python >= 3.9, <= 3.12` is installed.
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
@@ -121,7 +119,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
<Tab title="Brew">
|
||||
_Requirements_:
|
||||
|
||||
* `Brew` installed in your Mac or Linux
|
||||
* `Brew` installed on Mac or Linux
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
@@ -171,7 +169,8 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
## Container versions
|
||||
|
||||
## Container Versions
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ Choose one of the following installation methods:
|
||||
```bash
|
||||
docker run --rm -i \
|
||||
-e PROWLER_APP_API_KEY="pk_your_api_key" \
|
||||
-e PROWLER_API_BASE_URL="https://api.prowler.com" \
|
||||
-e API_BASE_URL="https://api.prowler.com/api/v1" \
|
||||
prowlercloud/prowler-mcp
|
||||
```
|
||||
|
||||
@@ -181,19 +181,19 @@ Configure the server using environment variables:
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `PROWLER_APP_API_KEY` | Prowler API key | Only for STDIO mode | - |
|
||||
| `PROWLER_API_BASE_URL` | Custom Prowler API endpoint | No | `https://api.prowler.com` |
|
||||
| `API_BASE_URL` | Custom Prowler API endpoint | No | `https://api.prowler.com/api/v1` |
|
||||
| `PROWLER_MCP_TRANSPORT_MODE` | Default transport mode (overwritten by `--transport` argument) | No | `stdio` |
|
||||
|
||||
<CodeGroup>
|
||||
```bash macOS/Linux
|
||||
export PROWLER_APP_API_KEY="pk_your_api_key_here"
|
||||
export PROWLER_API_BASE_URL="https://api.prowler.com"
|
||||
export API_BASE_URL="https://api.prowler.com/api/v1"
|
||||
export PROWLER_MCP_TRANSPORT_MODE="http"
|
||||
```
|
||||
|
||||
```bash Windows PowerShell
|
||||
$env:PROWLER_APP_API_KEY="pk_your_api_key_here"
|
||||
$env:PROWLER_API_BASE_URL="https://api.prowler.com"
|
||||
$env:API_BASE_URL="https://api.prowler.com/api/v1"
|
||||
$env:PROWLER_MCP_TRANSPORT_MODE="http"
|
||||
```
|
||||
</CodeGroup>
|
||||
@@ -208,7 +208,7 @@ For convenience, create a `.env` file in the `mcp_server` directory:
|
||||
|
||||
```bash .env
|
||||
PROWLER_APP_API_KEY=pk_your_api_key_here
|
||||
PROWLER_API_BASE_URL=https://api.prowler.com
|
||||
API_BASE_URL=https://api.prowler.com/api/v1
|
||||
PROWLER_MCP_TRANSPORT_MODE=stdio
|
||||
```
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ title: "Overview"
|
||||
|
||||
**Why this matters**: Every engineer has asked, “What does this check actually do?” Prowler Hub answers that question in one place, lets you pin to a specific version, and pulls definitions into your own tools or dashboards.
|
||||
|
||||

|
||||

|
||||
|
||||
<Card title="Go to Prowler Hub" href="https://hub.prowler.com" />
|
||||
|
||||
@@ -14,4 +14,4 @@ Prowler Hub also provides a fully documented public API that you can integrate i
|
||||
|
||||
📚 Explore the API docs at: https://hub.prowler.com/api/docs
|
||||
|
||||
Whether you’re customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
|
||||
Whether you’re customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: 'Overview'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.8.0" />
|
||||
|
||||
Prowler Lighthouse AI is a Cloud Security Analyst chatbot that helps you understand, prioritize, and remediate security findings in your cloud environments. It's designed to provide security expertise for teams without dedicated resources, acting as your 24/7 virtual cloud security analyst.
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-intro.png" alt="Prowler Lighthouse" />
|
||||
|
||||
<Card title="Set Up Lighthouse AI" icon="rocket" href="/user-guide/tutorials/prowler-app-lighthouse#set-up">
|
||||
Learn how to configure Lighthouse AI with your preferred LLM provider
|
||||
</Card>
|
||||
|
||||
## Capabilities
|
||||
|
||||
Prowler Lighthouse AI is designed to be your AI security team member, with capabilities including:
|
||||
|
||||
### Natural Language Querying
|
||||
|
||||
Ask questions in plain English about your security findings. Examples:
|
||||
|
||||
- "What are my highest risk findings?"
|
||||
- "Show me all S3 buckets with public access."
|
||||
- "What security issues were found in my production accounts?"
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature1.png" alt="Natural language querying" />
|
||||
|
||||
### Detailed Remediation Guidance
|
||||
|
||||
Get tailored step-by-step instructions for fixing security issues:
|
||||
|
||||
- Clear explanations of the problem and its impact
|
||||
- Commands or console steps to implement fixes
|
||||
- Alternative approaches with different solutions
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature2.png" alt="Detailed Remediation" />
|
||||
|
||||
### Enhanced Context and Analysis
|
||||
|
||||
Lighthouse AI can provide additional context to help you understand the findings:
|
||||
|
||||
- Explain security concepts related to findings in simple terms
|
||||
- Provide risk assessments based on your environment and context
|
||||
- Connect related findings to show broader security patterns
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-config.png" alt="Business Context" />
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature3.png" alt="Contextual Responses" />
|
||||
|
||||
## Important Notes
|
||||
|
||||
Prowler Lighthouse AI is powerful, but there are limitations:
|
||||
|
||||
- **Continuous improvement**: Please report any issues, as the feature may make mistakes or encounter errors, despite extensive testing.
|
||||
- **Access limitations**: Lighthouse AI can only access data the logged-in user can view. If you can't see certain information, Lighthouse AI can't see it either.
|
||||
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
|
||||
- **Response quality**: The response quality depends on the selected LLM provider and model. Choose models with strong tool-calling capabilities for best results. We recommend `gpt-5` model from OpenAI.
|
||||
|
||||
### Getting Help
|
||||
|
||||
If you encounter issues with Prowler Lighthouse AI or have suggestions for improvements, please [reach out through our Slack channel](https://goto.prowler.com/slack).
|
||||
|
||||
### What Data Is Shared to LLM Providers?
|
||||
|
||||
The following API endpoints are accessible to Prowler Lighthouse AI. Data from the following API endpoints could be shared with LLM provider depending on the scope of user's query:
|
||||
|
||||
#### Accessible API Endpoints
|
||||
|
||||
**User Management:**
|
||||
|
||||
- List all users - `/api/v1/users`
|
||||
- Retrieve the current user's information - `/api/v1/users/me`
|
||||
|
||||
**Provider Management:**
|
||||
|
||||
- List all providers - `/api/v1/providers`
|
||||
- Retrieve data from a provider - `/api/v1/providers/{id}`
|
||||
|
||||
**Scan Management:**
|
||||
|
||||
- List all scans - `/api/v1/scans`
|
||||
- Retrieve data from a specific scan - `/api/v1/scans/{id}`
|
||||
|
||||
**Resource Management:**
|
||||
|
||||
- List all resources - `/api/v1/resources`
|
||||
- Retrieve data for a resource - `/api/v1/resources/{id}`
|
||||
|
||||
**Findings Management:**
|
||||
|
||||
- List all findings - `/api/v1/findings`
|
||||
- Retrieve data from a specific finding - `/api/v1/findings/{id}`
|
||||
- Retrieve metadata values from findings - `/api/v1/findings/metadata`
|
||||
|
||||
**Overview Data:**
|
||||
|
||||
- Get aggregated findings data - `/api/v1/overviews/findings`
|
||||
- Get findings data by severity - `/api/v1/overviews/findings_severity`
|
||||
- Get aggregated provider data - `/api/v1/overviews/providers`
|
||||
- Get findings data by service - `/api/v1/overviews/services`
|
||||
|
||||
**Compliance Management:**
|
||||
|
||||
- List compliance overviews (optionally filter by scan) - `/api/v1/compliance-overviews`
|
||||
- Retrieve data from a specific compliance overview - `/api/v1/compliance-overviews/{id}`
|
||||
|
||||
#### Excluded API Endpoints
|
||||
|
||||
Not all Prowler API endpoints are integrated with Lighthouse AI. They are intentionally excluded for the following reasons:
|
||||
|
||||
- OpenAI/other LLM providers shouldn't have access to sensitive data (like fetching provider secrets and other sensitive config)
|
||||
- Users queries don't need responses from those API endpoints (ex: tasks, tenant details, downloading zip file, etc.)
|
||||
|
||||
**Excluded Endpoints:**
|
||||
|
||||
**User Management:**
|
||||
|
||||
- List specific users information - `/api/v1/users/{id}`
|
||||
- List user memberships - `/api/v1/users/{user_pk}/memberships`
|
||||
- Retrieve membership data from the user - `/api/v1/users/{user_pk}/memberships/{id}`
|
||||
|
||||
**Tenant Management:**
|
||||
|
||||
- List all tenants - `/api/v1/tenants`
|
||||
- Retrieve data from a tenant - `/api/v1/tenants/{id}`
|
||||
- List tenant memberships - `/api/v1/tenants/{tenant_pk}/memberships`
|
||||
- List all invitations - `/api/v1/tenants/invitations`
|
||||
- Retrieve data from tenant invitation - `/api/v1/tenants/invitations/{id}`
|
||||
|
||||
**Security and Configuration:**
|
||||
|
||||
- List all secrets - `/api/v1/providers/secrets`
|
||||
- Retrieve data from a secret - `/api/v1/providers/secrets/{id}`
|
||||
- List all provider groups - `/api/v1/provider-groups`
|
||||
- Retrieve data from a provider group - `/api/v1/provider-groups/{id}`
|
||||
|
||||
**Reports and Tasks:**
|
||||
|
||||
- Download zip report - `/api/v1/scans/{v1}/report`
|
||||
- List all tasks - `/api/v1/tasks`
|
||||
- Retrieve data from a specific task - `/api/v1/tasks/{id}`
|
||||
|
||||
**Lighthouse AI Configuration:**
|
||||
|
||||
- List LLM providers - `/api/v1/lighthouse/providers`
|
||||
- Retrieve LLM provider - `/api/v1/lighthouse/providers/{id}`
|
||||
- List available models - `/api/v1/lighthouse/models`
|
||||
- Retrieve tenant configuration - `/api/v1/lighthouse/configuration`
|
||||
|
||||
<Note>
|
||||
Agents only have access to hit GET endpoints. They don't have access to other HTTP methods.
|
||||
|
||||
</Note>
|
||||
|
||||
## FAQs
|
||||
|
||||
**1. Which LLM providers are supported?**
|
||||
|
||||
Lighthouse AI supports three providers:
|
||||
|
||||
- **OpenAI** - GPT models (GPT-5, GPT-4o, etc.)
|
||||
- **Amazon Bedrock** - Claude, Llama, Titan, and other models via AWS
|
||||
- **OpenAI Compatible** - Custom endpoints like OpenRouter, Ollama, or any OpenAI-compatible service
|
||||
|
||||
For detailed configuration instructions, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
|
||||
|
||||
**2. Why a multi-agent supervisor model?**
|
||||
|
||||
Context windows are limited. While demo data fits inside the context window, querying real-world data often exceeds it. A multi-agent architecture is used so different agents fetch different sizes of data and respond with the minimum required data to the supervisor. This spreads the context window usage across agents.
|
||||
|
||||
**3. Is my security data shared with LLM providers?**
|
||||
|
||||
Minimal data is shared to generate useful responses. Agents can access security findings and remediation details when needed. Provider secrets are protected by design and cannot be read. The LLM provider credentials configured with Lighthouse AI are only accessible to our NextJS server and are never sent to the LLM providers. Resource metadata (names, tags, account/project IDs, etc) may be shared with the configured LLM provider based on query requirements.
|
||||
|
||||
**4. Can the Lighthouse AI change my cloud environment?**
|
||||
|
||||
No. The agent doesn't have the tools to make the changes, even if the configured cloud provider API keys contain permissions to modify resources.
|
||||
@@ -19,12 +19,11 @@ The Prowler MCP Server provides three main integration points:
|
||||
### 1. Prowler Cloud and Prowler App (Self-Managed)
|
||||
|
||||
Full access to Prowler Cloud platform and self-managed Prowler App for:
|
||||
- **Provider Management**: Create, configure, and manage cloud providers (AWS, Azure, GCP, etc.).
|
||||
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments.
|
||||
- **Findings Analysis**: Query, filter, and analyze security findings across all your cloud environments.
|
||||
- **Compliance Reporting**: Generate compliance reports for various frameworks (CIS, PCI-DSS, HIPAA, etc.).
|
||||
- **Secrets Management**: Securely manage provider credentials and connection details.
|
||||
- **Processor Configuration**: Set up the [Prowler Mutelist](/user-guide/tutorials/prowler-app-mute-findings) to mute findings.
|
||||
- **Findings Analysis**: Query, filter, and analyze security findings across all your cloud environments
|
||||
- **Provider Management**: Create, configure, and manage your configured Prowler providers (AWS, Azure, GCP, etc.)
|
||||
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments
|
||||
- **Resource Inventory**: Search and view detailed information about your audited resources
|
||||
- **Muting Management**: Create and manage muting lists/rules to suppress non-relevant findings
|
||||
|
||||
### 2. Prowler Hub
|
||||
|
||||
@@ -49,7 +48,10 @@ The following diagram illustrates the Prowler MCP Server architecture and its in
|
||||
<img className="block dark:hidden" src="/images/prowler_mcp_schema_light.png" alt="Prowler MCP Server Schema" />
|
||||
<img className="hidden dark:block" src="/images/prowler_mcp_schema_dark.png" alt="Prowler MCP Server Schema" />
|
||||
|
||||
The architecture shows how AI assistants connect through the MCP protocol to access Prowler's three main components: Prowler Cloud/App for security operations, Prowler Hub for security knowledge, and Prowler Documentation for guidance and reference.
|
||||
The architecture shows how AI assistants connect through the MCP protocol to access Prowler's three main components:
|
||||
- Prowler Cloud/App for security operations
|
||||
- Prowler Hub for security knowledge
|
||||
- Prowler Documentation for guidance and reference.
|
||||
|
||||
## Use Cases
|
||||
|
||||
@@ -57,12 +59,12 @@ The Prowler MCP Server enables powerful workflows through AI assistants:
|
||||
|
||||
**Security Operations**
|
||||
- "Show me all critical findings from my AWS production accounts"
|
||||
- "What is my compliance status for the PCI standards accross all my AWS accounts according to the latest Prowler scan results?"
|
||||
- "Register my new AWS account in Prowler and run an scheduled scan every day"
|
||||
- "Register my new AWS account in Prowler and run a scheduled scan every day"
|
||||
- "List all muted findings and detect what findgings are muted by a not enough good reason in relation to their severity"
|
||||
|
||||
**Security Research**
|
||||
- "Explain what the S3 bucket public access check does"
|
||||
- "Find all checks related to encryption at rest"
|
||||
- "Explain what the S3 bucket public access Prowler check does"
|
||||
- "Find all Prowler checks related to encryption at rest"
|
||||
- "What is the latest version of the CIS that Prowler is covering per provider?"
|
||||
|
||||
**Documentation & Learning**
|
||||
|
||||
|
Before Width: | Height: | Size: 420 KiB After Width: | Height: | Size: 743 KiB |
|
After Width: | Height: | Size: 256 KiB |
|
Before Width: | Height: | Size: 210 KiB |
|
After Width: | Height: | Size: 690 KiB |
|
After Width: | Height: | Size: 872 KiB |
|
Before Width: | Height: | Size: 197 KiB After Width: | Height: | Size: 96 KiB |
|
After Width: | Height: | Size: 540 KiB |
|
Before Width: | Height: | Size: 204 KiB After Width: | Height: | Size: 136 KiB |
|
Before Width: | Height: | Size: 241 KiB After Width: | Height: | Size: 147 KiB |
|
Before Width: | Height: | Size: 268 KiB After Width: | Height: | Size: 180 KiB |
|
Before Width: | Height: | Size: 404 KiB After Width: | Height: | Size: 165 KiB |
|
After Width: | Height: | Size: 347 KiB |
|
After Width: | Height: | Size: 173 KiB |
|
Before Width: | Height: | Size: 552 KiB |
|
After Width: | Height: | Size: 1.4 MiB |
@@ -33,7 +33,7 @@ The supported providers right now are:
|
||||
| [Github](/user-guide/providers/github/getting-started-github) | Official | UI, API, CLI |
|
||||
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | UI, API, CLI |
|
||||
| [Infra as Code](/user-guide/providers/iac/getting-started-iac) | Official | UI, API, CLI |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | CLI, API |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | UI, API, CLI |
|
||||
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | CLI |
|
||||
| **NHN** | Unofficial | CLI |
|
||||
|
||||
|
||||
@@ -24,6 +24,80 @@ We enforce [pre-commit](https://github.com/prowler-cloud/prowler/blob/master/.pr
|
||||
|
||||
Our container registries are continuously scanned for vulnerabilities, with findings automatically reported to our security team for assessment and remediation. This process evolves alongside our stack as we adopt new languages, frameworks, and technologies, ensuring our security practices remain comprehensive, proactive, and adaptable.
|
||||
|
||||
### Static Application Security Testing (SAST)
|
||||
|
||||
We employ multiple SAST tools across our codebase to identify security vulnerabilities, code quality issues, and potential bugs during development:
|
||||
|
||||
#### CodeQL Analysis
|
||||
- **Scope**: UI (JavaScript/TypeScript), API (Python), and SDK (Python)
|
||||
- **Frequency**: On every push and pull request, plus daily scheduled scans
|
||||
- **Integration**: Results uploaded to GitHub Security tab via SARIF format
|
||||
- **Purpose**: Identifies security vulnerabilities, coding errors, and potential exploits in source code
|
||||
|
||||
#### Python Security Scanners
|
||||
- **Bandit**: Detects common security issues in Python code (SQL injection, hardcoded passwords, etc.)
|
||||
- Configured to ignore test files and report only high-severity issues
|
||||
- Runs on both SDK and API codebases
|
||||
- **Pylint**: Static code analysis with security-focused checks
|
||||
- Integrated into pre-commit hooks and CI/CD pipelines
|
||||
|
||||
#### Code Quality & Dead Code Detection
|
||||
- **Vulture**: Identifies unused code that could indicate incomplete implementations or security gaps
|
||||
- **Flake8**: Style guide enforcement with security-relevant checks
|
||||
- **Shellcheck**: Security and correctness checks for shell scripts
|
||||
|
||||
### Software Composition Analysis (SCA)
|
||||
|
||||
We continuously monitor our dependencies for known vulnerabilities and ensure timely updates:
|
||||
|
||||
#### Dependency Vulnerability Scanning
|
||||
- **Safety**: Scans Python dependencies against known vulnerability databases
|
||||
- Runs on every commit via pre-commit hooks
|
||||
- Integrated into CI/CD for SDK and API
|
||||
- Configured with selective ignores for tracked exceptions
|
||||
- **Trivy**: Multi-purpose scanner for containers and dependencies
|
||||
- Scans all container images (UI, API, SDK, MCP Server)
|
||||
- Checks for vulnerabilities in OS packages and application dependencies
|
||||
- Reports findings to GitHub Security tab
|
||||
|
||||
#### Automated Dependency Updates
|
||||
- **Dependabot**: Automated pull requests for dependency updates
|
||||
- **Python (pip)**: Monthly updates for SDK
|
||||
- **GitHub Actions**: Monthly updates for workflow dependencies
|
||||
- **Docker**: Monthly updates for base images
|
||||
- Temporarily paused for API and UI to maintain stability during active development
|
||||
- **Security-first approach**: Even when paused, Dependabot automatically creates pull requests for security vulnerabilities, ensuring critical security patches are never delayed
|
||||
|
||||
### Container Security
|
||||
|
||||
All container images are scanned before deployment:
|
||||
|
||||
- **Trivy Vulnerability Scanning**:
|
||||
- Scans images for vulnerabilities and misconfigurations
|
||||
- Generates SARIF reports uploaded to GitHub Security tab
|
||||
- Creates PR comments with scan summaries
|
||||
- Configurable to fail builds on critical findings
|
||||
- Reports include CVE counts and remediation guidance
|
||||
- **Hadolint**: Dockerfile linting to enforce best practices
|
||||
- Validates Dockerfile syntax and structure
|
||||
- Ensures secure image building practices
|
||||
|
||||
### Secrets Detection
|
||||
|
||||
We protect against accidental exposure of sensitive credentials:
|
||||
|
||||
- **TruffleHog**: Scans entire codebase and Git history for secrets
|
||||
- Runs on every push and pull request
|
||||
- Pre-commit hook prevents committing secrets
|
||||
- Detects high-entropy strings, API keys, tokens, and credentials
|
||||
- Configured to report verified and unknown findings
|
||||
|
||||
### Security Monitoring
|
||||
|
||||
- **GitHub Security Tab**: Centralized view of all security findings from CodeQL, Trivy, and other SARIF-compatible tools
|
||||
- **Artifact Retention**: Security scan reports retained for post-deployment analysis
|
||||
- **PR Comments**: Automated security feedback on pull requests for rapid remediation
|
||||
|
||||
## Reporting Vulnerabilities
|
||||
|
||||
At Prowler, we consider the security of our open source software and systems a top priority. But no matter how much effort we put into system security, there can still be vulnerabilities present.
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
---
|
||||
title: 'Alibaba Cloud Authentication in Prowler'
|
||||
---
|
||||
|
||||
Prowler requires Alibaba Cloud credentials to perform security checks. Authentication is supported via multiple methods, prioritized as follows:
|
||||
|
||||
1. **Credentials URI**
|
||||
2. **OIDC Role Authentication**
|
||||
3. **ECS RAM Role**
|
||||
4. **RAM Role Assumption**
|
||||
5. **STS Temporary Credentials**
|
||||
6. **Permanent Access Keys**
|
||||
7. **Default Credential Chain**
|
||||
|
||||
## Authentication Methods
|
||||
|
||||
### Credentials URI (Recommended for Centralized Services)
|
||||
|
||||
If `--credentials-uri` is provided (or `ALIBABA_CLOUD_CREDENTIALS_URI` environment variable), Prowler will retrieve credentials from the specified external URI endpoint. The URI must return credentials in the standard JSON format.
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_CREDENTIALS_URI="http://localhost:8080/credentials"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### OIDC Role Authentication (Recommended for ACK/Kubernetes)
|
||||
|
||||
If OIDC environment variables are set, Prowler will use OIDC authentication to assume the specified role. This is the most secure method for containerized applications running in ACK (Alibaba Container Service for Kubernetes) with RRSA enabled.
|
||||
|
||||
Required environment variables:
|
||||
- `ALIBABA_CLOUD_ROLE_ARN`
|
||||
- `ALIBABA_CLOUD_OIDC_PROVIDER_ARN`
|
||||
- `ALIBABA_CLOUD_OIDC_TOKEN_FILE`
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ROLE_ARN="acs:ram::123456789012:role/YourRole"
|
||||
export ALIBABA_CLOUD_OIDC_PROVIDER_ARN="acs:ram::123456789012:oidc-provider/ack-rrsa-provider"
|
||||
export ALIBABA_CLOUD_OIDC_TOKEN_FILE="/var/run/secrets/tokens/oidc-token"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### ECS RAM Role (Recommended for ECS Instances)
|
||||
|
||||
When running on an ECS instance with an attached RAM role, Prowler can obtain credentials from the ECS instance metadata service.
|
||||
|
||||
```bash
|
||||
# Using CLI argument
|
||||
prowler alibabacloud --ecs-ram-role RoleName
|
||||
|
||||
# Or using environment variable
|
||||
export ALIBABA_CLOUD_ECS_METADATA="RoleName"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### RAM Role Assumption (Recommended for Cross-Account)
|
||||
|
||||
For cross-account access, use RAM role assumption. You must provide the initial credentials (access keys) and the target role ARN.
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-access-key-secret"
|
||||
export ALIBABA_CLOUD_ROLE_ARN="acs:ram::123456789012:role/ProwlerAuditRole"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### STS Temporary Credentials
|
||||
|
||||
If you already have temporary STS credentials, you can provide them via environment variables.
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-sts-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-sts-access-key-secret"
|
||||
export ALIBABA_CLOUD_SECURITY_TOKEN="your-sts-security-token"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### Permanent Access Keys
|
||||
|
||||
You can use standard permanent access keys via environment variables.
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-access-key-secret"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
## Required Permissions
|
||||
|
||||
The credentials used by Prowler should have the minimum required permissions to audit the resources. At a minimum, the following permissions are recommended:
|
||||
|
||||
- `ram:GetUser`
|
||||
- `ram:ListUsers`
|
||||
- `ram:GetPasswordPolicy`
|
||||
- `ram:GetAccountSummary`
|
||||
- `ram:ListVirtualMFADevices`
|
||||
- `ram:ListGroups`
|
||||
- `ram:ListPolicies`
|
||||
- `ram:ListAccessKeys`
|
||||
- `ram:GetLoginProfile`
|
||||
- `ram:ListPoliciesForUser`
|
||||
- `ram:ListGroupsForUser`
|
||||
- `actiontrail:DescribeTrails`
|
||||
- `oss:GetBucketLogging`
|
||||
- `oss:GetBucketAcl`
|
||||
- `rds:DescribeDBInstances`
|
||||
- `rds:DescribeDBInstanceAttribute`
|
||||
- `ecs:DescribeInstances`
|
||||
- `vpc:DescribeVpcs`
|
||||
- `sls:ListProject`
|
||||
- `sls:ListAlerts`
|
||||
- `sls:ListLogStores`
|
||||
- `sls:GetLogStore`
|
||||
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: 'Getting Started With Alibaba Cloud on Prowler'
|
||||
---
|
||||
|
||||
## Prowler CLI
|
||||
|
||||
### Configure Alibaba Cloud Credentials
|
||||
|
||||
Prowler requires Alibaba Cloud credentials to perform security checks. Authentication is available through the following methods (in order of priority):
|
||||
|
||||
1. **Credentials URI** (Recommended for centralized credential services)
|
||||
2. **OIDC Role Authentication** (Recommended for ACK/Kubernetes)
|
||||
3. **ECS RAM Role** (Recommended for ECS instances)
|
||||
4. **RAM Role Assumption** (Recommended for cross-account access)
|
||||
5. **STS Temporary Credentials**
|
||||
6. **Permanent Access Keys**
|
||||
7. **Default Credential Chain**
|
||||
|
||||
<Warning>
|
||||
Prowler does not accept credentials through command-line arguments. Provide credentials through environment variables or the Alibaba Cloud credential chain.
|
||||
|
||||
</Warning>
|
||||
|
||||
#### Option 1: Environment Variables (Permanent Credentials)
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-access-key-secret"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
#### Option 2: Environment Variables (STS Temporary Credentials)
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-sts-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-sts-access-key-secret"
|
||||
export ALIBABA_CLOUD_SECURITY_TOKEN="your-sts-security-token"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
#### Option 3: RAM Role Assumption (Environment Variables)
|
||||
|
||||
```bash
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-access-key-secret"
|
||||
export ALIBABA_CLOUD_ROLE_ARN="acs:ram::123456789012:role/ProwlerAuditRole"
|
||||
export ALIBABA_CLOUD_ROLE_SESSION_NAME="ProwlerAssessmentSession" # Optional
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
#### Option 4: RAM Role Assumption (CLI + Environment Variables)
|
||||
|
||||
```bash
|
||||
# Set credentials via environment variables
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_ID="your-access-key-id"
|
||||
export ALIBABA_CLOUD_ACCESS_KEY_SECRET="your-access-key-secret"
|
||||
# Specify role via CLI argument
|
||||
prowler alibabacloud --role-arn acs:ram::123456789012:role/ProwlerAuditRole --role-session-name ProwlerAssessmentSession
|
||||
```
|
||||
|
||||
#### Option 5: ECS Instance Metadata (ECS RAM Role)
|
||||
|
||||
```bash
|
||||
# When running on an ECS instance with an attached RAM role
|
||||
prowler alibabacloud --ecs-ram-role RoleName
|
||||
|
||||
# Or using environment variable
|
||||
export ALIBABA_CLOUD_ECS_METADATA="RoleName"
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
#### Option 6: OIDC Role Authentication (for ACK/Kubernetes)
|
||||
|
||||
```bash
|
||||
# For applications running in ACK (Alibaba Container Service for Kubernetes) with RRSA enabled
|
||||
export ALIBABA_CLOUD_ROLE_ARN="acs:ram::123456789012:role/YourRole"
|
||||
export ALIBABA_CLOUD_OIDC_PROVIDER_ARN="acs:ram::123456789012:oidc-provider/ack-rrsa-provider"
|
||||
export ALIBABA_CLOUD_OIDC_TOKEN_FILE="/var/run/secrets/tokens/oidc-token"
|
||||
export ALIBABA_CLOUD_ROLE_SESSION_NAME="ProwlerOIDCSession" # Optional
|
||||
prowler alibabacloud
|
||||
|
||||
# Or using CLI argument
|
||||
prowler alibabacloud --oidc-role-arn acs:ram::123456789012:role/YourRole
|
||||
```
|
||||
|
||||
#### Option 7: Credentials URI (External Credential Service)
|
||||
|
||||
```bash
|
||||
# Retrieve credentials from an external URI endpoint
|
||||
export ALIBABA_CLOUD_CREDENTIALS_URI="http://localhost:8080/credentials"
|
||||
prowler alibabacloud
|
||||
|
||||
# Or using CLI argument
|
||||
prowler alibabacloud --credentials-uri http://localhost:8080/credentials
|
||||
```
|
||||
|
||||
#### Option 8: Default Credential Chain
|
||||
|
||||
The SDK automatically checks credentials in the following order:
|
||||
1. Environment variables (`ALIBABA_CLOUD_*` or `ALIYUN_*`)
|
||||
2. OIDC authentication (if OIDC environment variables are set)
|
||||
3. Configuration file (`~/.aliyun/config.json`)
|
||||
4. ECS instance metadata (if running on ECS)
|
||||
5. Credentials URI (if `ALIBABA_CLOUD_CREDENTIALS_URI` is set)
|
||||
|
||||
```bash
|
||||
prowler alibabacloud
|
||||
```
|
||||
|
||||
### Specify Regions
|
||||
|
||||
To run checks only in specific regions:
|
||||
|
||||
```bash
|
||||
prowler alibabacloud --regions cn-hangzhou cn-shanghai
|
||||
```
|
||||
|
||||
### Run Specific Checks
|
||||
|
||||
To run specific checks:
|
||||
|
||||
```bash
|
||||
prowler alibabacloud --checks ram_no_root_access_key ram_user_mfa_enabled_console_access
|
||||
```
|
||||
|
||||
### Run Compliance Framework
|
||||
|
||||
To run a specific compliance framework:
|
||||
|
||||
```bash
|
||||
prowler alibabacloud --compliance cis_2.0_alibabacloud
|
||||
```
|
||||
@@ -49,8 +49,9 @@ This method grants permanent access and is the recommended setup for production
|
||||

|
||||

|
||||
|
||||
!!! info
|
||||
An **External ID** is required when assuming the *ProwlerScan* role to comply with AWS [confused deputy prevention](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html).
|
||||
<Info>
|
||||
An **External ID** is required when assuming the *ProwlerScan* role to prevent the [confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html).
|
||||
</Info>
|
||||
|
||||
6. Acknowledge the IAM resource creation warning and proceed
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ title: 'Getting Started With AWS on Prowler'
|
||||
|
||||
6. Choose the preferred authentication method (next step)
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
### Step 3: Set Up AWS Authentication
|
||||
|
||||
@@ -76,7 +76,7 @@ For Google Cloud, first enter your `GCP Project ID` and then select the authenti
|
||||
|
||||
7. Click "Next", then "Launch Scan"
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -5,18 +5,26 @@ import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
Prowler's Infrastructure as Code (IaC) provider enables scanning of local or remote infrastructure code for security and compliance issues using [Trivy](https://trivy.dev/). This provider supports a wide range of IaC frameworks, allowing assessment of code before deployment.
|
||||
|
||||
## Supported Scanners
|
||||
## Supported IaC Formats
|
||||
|
||||
The IaC provider leverages [Trivy](https://trivy.dev/latest/docs/scanner/vulnerability/) to support multiple scanners, including:
|
||||
Prowler IaC provider scans the following Infrastructure as Code configurations for misconfigurations and secrets:
|
||||
|
||||
- Vulnerability
|
||||
- Misconfiguration
|
||||
- Secret
|
||||
- License
|
||||
| Configuration Type | File Patterns |
|
||||
|--------------------|----------------------------------------------|
|
||||
| Kubernetes | `*.yml`, `*.yaml`, `*.json` |
|
||||
| Docker | `Dockerfile`, `Containerfile` |
|
||||
| Terraform | `*.tf`, `*.tf.json`, `*.tfvars` |
|
||||
| Terraform Plan | `tfplan`, `*.tfplan`, `*.json` |
|
||||
| CloudFormation | `*.yml`, `*.yaml`, `*.json` |
|
||||
| Azure ARM Template | `*.json` |
|
||||
| Helm | `*.yml`, `*.yaml`, `*.tpl`, `*.tar.gz`, etc. |
|
||||
| YAML | `*.yaml`, `*.yml` |
|
||||
| JSON | `*.json` |
|
||||
| Ansible | `*.yml`, `*.yaml`, `*.json`, `*.ini`, without extension |
|
||||
|
||||
## How It Works
|
||||
|
||||
- The IaC provider scans local directories (or specified paths) for supported IaC files, or scans remote repositories.
|
||||
- Prowler App leverages [Trivy](https://trivy.dev/docs/latest/guide/coverage/iac/#scanner) to scan local directories (or specified paths) for supported IaC files, or scans remote repositories.
|
||||
- No cloud credentials or authentication are required for local scans.
|
||||
- For remote repository scans, authentication can be provided via [git URL](https://git-scm.com/docs/git-clone#_git_urls), CLI flags or environment variables.
|
||||
- Check the [IaC Authentication](/user-guide/providers/iac/authentication) page for more details.
|
||||
@@ -27,6 +35,10 @@ The IaC provider leverages [Trivy](https://trivy.dev/latest/docs/scanner/vulnera
|
||||
|
||||
<VersionBadge version="5.14.0" />
|
||||
|
||||
### Supported Scanners
|
||||
|
||||
Scanner selection is not configurable in Prowler App. Default scanners, misconfig and secret, run automatically during each scan.
|
||||
|
||||
### Step 1: Access Prowler Cloud/App
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
@@ -63,6 +75,17 @@ The IaC provider leverages [Trivy](https://trivy.dev/latest/docs/scanner/vulnera
|
||||
|
||||
<VersionBadge version="5.8.0" />
|
||||
|
||||
### Supported Scanners
|
||||
|
||||
Prowler CLI supports the following scanners:
|
||||
|
||||
- [Vulnerability](https://trivy.dev/docs/latest/guide/scanner/vulnerability/)
|
||||
- [Misconfiguration](https://trivy.dev/docs/latest/guide/scanner/misconfiguration/)
|
||||
- [Secret](https://trivy.dev/docs/latest/guide/scanner/secret/)
|
||||
- [License](https://trivy.dev/docs/latest/guide/scanner/license/)
|
||||
|
||||
By default, only misconfiguration and secret scanners run during a scan. To specify which scanners to use, refer to the [Specify Scanners](#specify-scanners) section below.
|
||||
|
||||
### Usage
|
||||
|
||||
Use the `iac` argument to run Prowler with the IaC provider. Specify the directory or repository to scan, frameworks to include, and paths to exclude.
|
||||
@@ -103,7 +126,7 @@ Authentication for private repositories can be provided using one of the followi
|
||||
|
||||
#### Specify Scanners
|
||||
|
||||
Scan only vulnerability and misconfiguration scanners:
|
||||
To run only specific scanners, use the `--scanners` flag. For example, to scan only for vulnerabilities and misconfigurations:
|
||||
|
||||
```sh
|
||||
prowler iac --scan-path ./my-iac-directory --scanners vuln misconfig
|
||||
|
||||
@@ -47,8 +47,43 @@ If you are adding an **EKS**, **GKE**, **AKS** or external cluster, follow these
|
||||
kubectl create token prowler-sa -n prowler-ns --duration=0
|
||||
```
|
||||
|
||||
- **Security Note:** The `--duration=0` option generates a non-expiring token, which may pose a security risk if not managed properly. Users should decide on an appropriate expiration time based on their security policies. If a limited-time token is preferred, set `--duration=<TIME>` (e.g., `--duration=24h`).
|
||||
- **Important:** If the token expires, Prowler Cloud will no longer be able to authenticate with the cluster. In this case, you will need to generate a new token and **remove and re-add the provider in Prowler Cloud** with the updated `kubeconfig`.
|
||||
- **Security Note:** The `--duration=0` option generates a non-expiring token, which may pose a security risk if not managed properly. Choose an appropriate expiration time based on security policies. For a limited-time token, set `--duration=<TIME>` (e.g., `--duration=24h`).
|
||||
<Note>
|
||||
**Important:** If the token expires, Prowler Cloud can no longer authenticate with the cluster. Generate a new token and **remove and re-add the provider in Prowler Cloud** with the updated `kubeconfig`.
|
||||
</Note>
|
||||
<Tip>
|
||||
**Token Expiration Limits**
|
||||
|
||||
|
||||
When the Kubernetes cluster has `--service-account-max-token-expiration` configured, any token requested with a duration exceeding the maximum allowed value (including `--duration=0`) is automatically reduced to the cluster's maximum token expiration time. As an alternative solution, create a legacy Secret manually. Although Kubernetes no longer creates these secrets automatically, manual creation and linking to a ServiceAccount is still supported. These tokens do not expire until the secret or ServiceAccount is deleted.
|
||||
|
||||
**Steps:**
|
||||
|
||||
1. Create a `secret-sa.yaml` file (or any preferred name) with the following content:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prowler-token-long-lived
|
||||
namespace: prowler-ns
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: "prowler-sa"
|
||||
type: kubernetes.io/service-account-token
|
||||
```
|
||||
|
||||
2. Apply the secret:
|
||||
|
||||
```console
|
||||
kubectl apply -f secret-sa.yaml
|
||||
```
|
||||
|
||||
3. Retrieve the token (which will be permanent):
|
||||
|
||||
```console
|
||||
kubectl get secret prowler-token-long-lived -n prowler-ns -o jsonpath='{.data.token}' | base64 --decode
|
||||
```
|
||||
</Tip>
|
||||
|
||||
3. Update your `kubeconfig` to use the ServiceAccount token:
|
||||
|
||||
|
||||
@@ -2,25 +2,38 @@
|
||||
title: "Microsoft 365 Authentication in Prowler"
|
||||
---
|
||||
|
||||
Prowler for Microsoft 365 supports multiple authentication types. Authentication methods vary between Prowler App and Prowler CLI:
|
||||
Prowler for Microsoft 365 supports multiple authentication types across Prowler Cloud and Prowler CLI.
|
||||
|
||||
**Prowler App:**
|
||||
## Navigation
|
||||
- [Common Setup](#common-setup)
|
||||
- [Prowler Cloud Authentication](#prowler-cloud-authentication)
|
||||
- [Prowler CLI Authentication](#prowler-cli-authentication)
|
||||
- [Supported PowerShell Versions](#supported-powershell-versions)
|
||||
- [Required PowerShell Modules](#required-powershell-modules)
|
||||
|
||||
- [**Application Certificate Authentication**](#certificate-based-authentication) (**Recommended**)
|
||||
- [**Application Client Secret Authentication**](#client-secret-authentication)
|
||||
## Common Setup
|
||||
|
||||
### Authentication Methods Overview
|
||||
|
||||
Prowler Cloud uses app-only authentication. Prowler CLI supports the same app-only options and two delegated flows.
|
||||
|
||||
**Prowler Cloud:**
|
||||
|
||||
- [**Application Certificate Authentication**](#application-certificate-authentication-recommended) (**Recommended**)
|
||||
- [**Application Client Secret Authentication**](#application-client-secret-authentication)
|
||||
|
||||
**Prowler CLI:**
|
||||
|
||||
- [**Application Certificate Authentication**](#certificate-based-authentication) (**Recommended**)
|
||||
- [**Application Client Secret Authentication**](#client-secret-authentication)
|
||||
- [**Application Certificate Authentication**](#application-certificate-authentication-recommended) (**Recommended**)
|
||||
- [**Application Client Secret Authentication**](#application-client-secret-authentication)
|
||||
- [**Azure CLI Authentication**](#azure-cli-authentication)
|
||||
- [**Interactive Browser Authentication**](#interactive-browser-authentication)
|
||||
|
||||
## Required Permissions
|
||||
### Required Permissions
|
||||
|
||||
To run the full Prowler provider, including PowerShell checks, two types of permission scopes must be set in **Microsoft Entra ID**.
|
||||
|
||||
### Application Permissions for App-Only Authentication
|
||||
#### Application Permissions for App-Only Authentication
|
||||
|
||||
When using service principal authentication, add these **Application Permissions**:
|
||||
|
||||
@@ -44,6 +57,7 @@ When using service principal authentication, add these **Application Permissions
|
||||
These permissions enable application-based authentication methods (client secret and certificate). Using certificate-based authentication is the recommended way to run the full M365 provider, including PowerShell checks.
|
||||
|
||||
</Note>
|
||||
|
||||
### Browser Authentication Permissions
|
||||
|
||||
When using browser authentication, permissions are delegated to the user, so the user must have the appropriate permissions rather than the application.
|
||||
@@ -52,37 +66,38 @@ When using browser authentication, permissions are delegated to the user, so the
|
||||
Browser and Azure CLI authentication methods limit scanning capabilities to checks that operate through Microsoft Graph API. Checks requiring PowerShell modules will not execute, as they need application-level permissions that cannot be delegated through browser authentication.
|
||||
|
||||
</Warning>
|
||||
|
||||
### Step-by-Step Permission Assignment
|
||||
|
||||
#### Create Application Registration
|
||||
|
||||
1. Access **Microsoft Entra ID**
|
||||
1. Access **Microsoft Entra ID**.
|
||||
|
||||

|
||||
|
||||
2. Navigate to "Applications" > "App registrations"
|
||||
2. Navigate to "Applications" > "App registrations".
|
||||
|
||||

|
||||
|
||||
3. Click "+ New registration", complete the form, and click "Register"
|
||||
3. Click "+ New registration", complete the form, and click "Register".
|
||||
|
||||

|
||||
|
||||
4. Go to "Certificates & secrets" > "Client secrets" > "+ New client secret"
|
||||
4. Go to "Certificates & secrets" > "Client secrets" > "+ New client secret".
|
||||
|
||||

|
||||
|
||||
5. Fill in the required fields and click "Add", then copy the generated value (this will be `AZURE_CLIENT_SECRET`)
|
||||
5. Fill in the required fields and click "Add", then copy the generated value (this will be `AZURE_CLIENT_SECRET`).
|
||||
|
||||

|
||||
|
||||
#### Grant Microsoft Graph API Permissions
|
||||
|
||||
1. Go to App Registration > Select your Prowler App > click on "API permissions"
|
||||
1. Open **API permissions** for the Prowler application registration.
|
||||
|
||||

|
||||
|
||||
2. Click "+ Add a permission" > "Microsoft Graph" > "Application permissions"
|
||||
2. Click "+ Add a permission" > "Microsoft Graph" > "Application permissions".
|
||||
|
||||

|
||||
|
||||
@@ -97,38 +112,39 @@ Browser and Azure CLI authentication methods limit scanning capabilities to chec
|
||||
|
||||

|
||||
|
||||
4. Click "Add permissions", then click "Grant admin consent for `<your-tenant-name>`"
|
||||
4. Click "Add permissions", then click "Grant admin consent for `<your-tenant-name>`".
|
||||
|
||||
<a id="grant-powershell-module-permissions-for-app-only-authentication"></a>
|
||||
#### Grant PowerShell Module Permissions
|
||||
1. **Add Exchange API:**
|
||||
|
||||
- Search and select "Office 365 Exchange Online" API in **APIs my organization uses**
|
||||
- Search and select "Office 365 Exchange Online" API in **APIs my organization uses**.
|
||||
|
||||

|
||||
|
||||
- Select "Exchange.ManageAsApp" permission and click "Add permissions"
|
||||
- Select "Exchange.ManageAsApp" permission and click "Add permissions".
|
||||
|
||||

|
||||
|
||||
- Assign `Global Reader` role to the app: Go to `Roles and administrators` > click `here` for directory level assignment
|
||||
- Assign `Global Reader` role to the app: Go to `Roles and administrators` > click `here` for directory level assignment.
|
||||
|
||||

|
||||
|
||||
- Search for `Global Reader` and assign it to your application
|
||||
- Search for `Global Reader` and assign it to the application.
|
||||
|
||||

|
||||
|
||||
2. **Add Teams API:**
|
||||
|
||||
- Search and select "Skype and Teams Tenant Admin API" in **APIs my organization uses**
|
||||
- Search and select "Skype and Teams Tenant Admin API" in **APIs my organization uses**.
|
||||
|
||||

|
||||
|
||||
- Select "application_access" permission and click "Add permissions"
|
||||
- Select "application_access" permission and click "Add permissions".
|
||||
|
||||

|
||||
|
||||
3. Click "Grant admin consent for `<your-tenant-name>`" to grant admin consent
|
||||
3. Click "Grant admin consent for `<your-tenant-name>`" to grant admin consent.
|
||||
|
||||

|
||||
|
||||
@@ -136,11 +152,13 @@ Final permissions should look like this:
|
||||
|
||||

|
||||
|
||||
Use the same application registration for both Prowler Cloud and Prowler CLI while switching authentication methods as needed.
|
||||
|
||||
<a id="client-secret-authentication"></a>
|
||||
<a id="certificate-based-authentication"></a>
|
||||
## Application Certificate Authentication (Recommended)
|
||||
|
||||
_Available for both Prowler App and Prowler CLI_
|
||||
_Available for both Prowler Cloud and Prowler CLI_
|
||||
|
||||
**Authentication flag for CLI:** `--certificate-auth`
|
||||
|
||||
@@ -173,11 +191,11 @@ Guard `prowlerm365.key` and `prowlerm365.pfx`. Only upload the `.cer` file to th
|
||||
|
||||
</Warning>
|
||||
|
||||
If your organization uses a certificate authority, you can replace step 2 with a CSR workflow and import the signed certificate instead.
|
||||
If an internal certificate authority is preferred, replace step 2 with a CSR workflow and import the signed certificate instead.
|
||||
|
||||
### Upload the Certificate to Microsoft Entra ID
|
||||
|
||||
1. Open **Microsoft Entra ID** > **App registrations** > your application.
|
||||
1. Open **Microsoft Entra ID** > **App registrations** > the Prowler application.
|
||||
2. Go to **Certificates & secrets** > **Certificates**.
|
||||
3. Select **Upload certificate** and choose `prowlerm365.cer`.
|
||||
4. Confirm the certificate appears with the expected expiration date.
|
||||
@@ -189,45 +207,37 @@ base64 -i prowlerm365.pfx -o prowlerm365.pfx.b64
|
||||
cat prowlerm365.pfx.b64 | tr -d '\n'
|
||||
```
|
||||
|
||||
Copy the resulting single-line Base64 string (or the contents of `prowlerm365.pfx.b64`)—you will use it in the next step.
|
||||
Copy the resulting single-line Base64 string (or the contents of `prowlerm365.pfx.b64`) for the next step.
|
||||
|
||||
### Provide the Certificate to Prowler
|
||||
|
||||
You can supply the private certificate to Prowler in two ways:
|
||||
- **Prowler Cloud:** Paste the Base64-encoded PFX in the `certificate_content` field when configuring the Microsoft 365 provider in Prowler Cloud.
|
||||
- **Prowler CLI:** Export credential variables or pass the local file path when running Prowler.
|
||||
|
||||
- **Environment variables (recommended for headless execution)**
|
||||
```console
|
||||
export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
|
||||
export AZURE_TENANT_ID="11111111-1111-1111-1111-111111111111"
|
||||
export M365_CERTIFICATE_CONTENT="$(base64 < prowlerm365.pfx | tr -d '\n')"
|
||||
```
|
||||
|
||||
```console
|
||||
export AZURE_CLIENT_ID="00000000-0000-0000-0000-000000000000"
|
||||
export AZURE_TENANT_ID="11111111-1111-1111-1111-111111111111"
|
||||
export M365_CERTIFICATE_CONTENT="$(base64 < prowlerm365.pfx | tr -d '\n')"
|
||||
```
|
||||
Store the PFX securely and reference it when running the CLI:
|
||||
|
||||
The `M365_CERTIFICATE_CONTENT` variable must contain a single-line Base64 string. Remove any line breaks or spaces before exporting.
|
||||
```console
|
||||
python3 prowler-cli.py m365 --certificate-auth --certificate-path /secure/path/prowlerm365.pfx
|
||||
```
|
||||
|
||||
- **Local file path**
|
||||
|
||||
Store the PFX securely and reference it when you run the CLI:
|
||||
|
||||
```console
|
||||
python3 prowler-cli.py m365 --certificate-auth --certificate-path /secure/path/prowlerm365.pfx
|
||||
```
|
||||
|
||||
The CLI still needs `AZURE_CLIENT_ID` and `AZURE_TENANT_ID` in the environment when you use `--certificate-path`.
|
||||
|
||||
For the **Prowler App**, paste the Base64-encoded PFX in the `certificate_content` field when you configure the provider secrets. The platform persists the encrypted certificate and supplies it during scans.
|
||||
The CLI still needs `AZURE_CLIENT_ID` and `AZURE_TENANT_ID` in the environment when `--certificate-path` is used.
|
||||
|
||||
<Note>
|
||||
Do not mix certificate authentication with a client secret. Provide either a certificate **or** a secret to the application registration and Prowler configuration.
|
||||
|
||||
</Note>
|
||||
|
||||
<a id="client-secret-authentication"></a>
|
||||
<a id="service-principal-authentication"></a>
|
||||
<a id="service-principal-authentication-recommended"></a>
|
||||
## Application Client Secret Authentication
|
||||
|
||||
_Available for both Prowler App and Prowler CLI_
|
||||
_Available for both Prowler Cloud and Prowler CLI_
|
||||
|
||||
**Authentication flag for CLI:** `--sp-env-auth`
|
||||
|
||||
@@ -239,35 +249,59 @@ export AZURE_CLIENT_SECRET="XXXXXXXXX"
|
||||
export AZURE_TENANT_ID="XXXXXXXXX"
|
||||
```
|
||||
|
||||
If these variables are not set or exported, execution using `--sp-env-auth` will fail.
|
||||
|
||||
Refer to the [Step-by-Step Permission Assignment](#step-by-step-permission-assignment) section below for setup instructions.
|
||||
|
||||
If the external API permissions described in the mentioned section above are not added only checks that work through MS Graph will be executed. This means that the full provider will not be executed.
|
||||
|
||||
This workflow is helpful for initial validation or temporary access. Plan to transition to certificate-based authentication to remove long-lived secrets and keep full provider coverage in unattended environments.
|
||||
If these variables are not set or exported, execution using `--sp-env-auth` will fail. This workflow is helpful for initial validation or temporary access. Plan to transition to certificate-based authentication to remove long-lived secrets and keep full provider coverage in unattended environments.
|
||||
|
||||
<Note>
|
||||
To scan every M365 check, ensure the required permissions are added to the application registration. Refer to the [PowerShell Module Permissions](#grant-powershell-module-permissions-for-app-only-authentication) section for more information.
|
||||
|
||||
</Note>
|
||||
|
||||
### Run Prowler with Certificate Authentication
|
||||
If the external API permissions described above are not added, only checks that work through Microsoft Graph will be executed. This means that the full provider will not be executed.
|
||||
|
||||
After the variables or path are in place, run the Microsoft 365 provider as usual:
|
||||
## Prowler Cloud Authentication
|
||||
|
||||
Use the shared permissions and credentials above, then complete the Microsoft 365 provider form in Prowler Cloud. The platform persists the encrypted credentials and supplies them during scans.
|
||||
|
||||
### Application Certificate Authentication (Recommended)
|
||||
|
||||
1. Select **Application Certificate Authentication**.
|
||||
2. Enter the **tenant ID** and **application (client) ID**.
|
||||
3. Paste the Base64-encoded certificate content.
|
||||
|
||||
This method keeps all Microsoft 365 checks available, including PowerShell-based checks.
|
||||
|
||||
### Application Client Secret Authentication
|
||||
|
||||
1. Select **Application Client Secret Authentication**.
|
||||
2. Enter the **tenant ID** and **application (client) ID**.
|
||||
3. Enter the **client secret**.
|
||||
|
||||
## Prowler CLI Authentication
|
||||
|
||||
### Certificate Authentication
|
||||
|
||||
**Authentication flag for CLI:** `--certificate-auth`
|
||||
|
||||
After credentials are exported, launch the Microsoft 365 provider with certificate authentication:
|
||||
|
||||
```console
|
||||
python3 prowler-cli.py m365 --certificate-auth --init-modules --log-level ERROR
|
||||
```
|
||||
|
||||
The command above initializes PowerShell modules if needed. You can combine other standard flags (for example, `--region M365USGovernment` or custom outputs) with `--certificate-auth`.
|
||||
Prowler prints the certificate thumbprint during execution so the correct credential can be verified.
|
||||
|
||||
Prowler prints the certificate thumbprint during execution so you can confirm the correct credential is in use.
|
||||
### Client Secret Authentication
|
||||
|
||||
**Authentication flag for CLI:** `--sp-env-auth`
|
||||
|
||||
After exporting the secret-based variables, run:
|
||||
|
||||
```console
|
||||
python3 prowler-cli.py m365 --sp-env-auth --init-modules --log-level ERROR
|
||||
```
|
||||
|
||||
<a id="azure-cli-authentication"></a>
|
||||
## Azure CLI Authentication
|
||||
|
||||
_Available only for Prowler CLI_
|
||||
### Azure CLI Authentication
|
||||
|
||||
**Authentication flag for CLI:** `--az-cli-auth`
|
||||
|
||||
@@ -279,7 +313,7 @@ az login --tenant <TENANT_ID>
|
||||
az account set --tenant <TENANT_ID>
|
||||
```
|
||||
|
||||
If you prefer to reuse the same service principal that powers certificate-based authentication, authenticate it through Azure CLI instead of exporting environment variables. Azure CLI expects the certificate in PEM format; convert the PFX produced earlier and sign in:
|
||||
If reusing the same service principal that powers certificate-based authentication, authenticate it through Azure CLI instead of exporting environment variables. Azure CLI expects the certificate in PEM format; convert the PFX produced earlier and sign in:
|
||||
|
||||
```console
|
||||
openssl pkcs12 -in prowlerm365.pfx -out prowlerm365.pem -nodes
|
||||
@@ -297,11 +331,9 @@ python3 prowler-cli.py m365 --az-cli-auth
|
||||
|
||||
The Azure CLI identity must hold the same Microsoft Graph and external API permissions required for the full provider. Signing in with a user account limits the scan to delegated Microsoft Graph endpoints and skips PowerShell-based checks. Use a service principal with the necessary application permissions to keep complete coverage.
|
||||
|
||||
## Interactive Browser Authentication
|
||||
### Interactive Browser Authentication
|
||||
|
||||
_Available only for Prowler CLI_
|
||||
|
||||
**Authentication flag:** `--browser-auth`
|
||||
**Authentication flag for CLI:** `--browser-auth`
|
||||
|
||||
Authenticate against Azure using the default browser to start the scan. The `--tenant-id` flag is also required.
|
||||
|
||||
|
||||