Compare commits
144 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b050c917c6 | |||
| 67933d7d2d | |||
| 39280c8b9b | |||
| 4bcaf29b32 | |||
| e95be697ef | |||
| 6fa4565ebd | |||
| e426c29207 | |||
| 1d8d4f9325 | |||
| cad44a3510 | |||
| ee73e043f9 | |||
| 815797bc2b | |||
| 9cd249c561 | |||
| 00fe96a9f7 | |||
| 7c45ee1dbb | |||
| d19a23f829 | |||
| b071fffe57 | |||
| 422c55404b | |||
| 6c307385b0 | |||
| 13964ccb1c | |||
| 64ed526e31 | |||
| 2388a053ee | |||
| 7bb5354275 | |||
| 03cae9895b | |||
| e398b654d4 | |||
| d9e978af29 | |||
| 95d9e9a59f | |||
| 48f19d0f11 | |||
| 345033e58a | |||
| 15cb87534c | |||
| 5a85db103d | |||
| 2b86078d06 | |||
| b2abdbeb60 | |||
| dc852b4595 | |||
| 1250f582a5 | |||
| bb43e924ee | |||
| 0225627a98 | |||
| 3097513525 | |||
| 6af9ff4b4b | |||
| 06fa57a949 | |||
| dc9e91ac4e | |||
| 59f8dfe5ae | |||
| 7e0c5540bb | |||
| 79ec53bfc5 | |||
| ed5f6b3af6 | |||
| 6e135abaa0 | |||
| 65b054f798 | |||
| 28d5b2bb6c | |||
| c8d9f37e70 | |||
| 9d7b9c3327 | |||
| 127b8d8e56 | |||
| 4e9dd46a5e | |||
| 880345bebe | |||
| 1259713fd6 | |||
| 26088868a2 | |||
| e58574e2a4 | |||
| a07e599cfc | |||
| e020b3f74b | |||
| 8e7e376e4f | |||
| a63a3d3f68 | |||
| 10838de636 | |||
| 5ebf455e04 | |||
| 0d59441c5f | |||
| 3b05a1430e | |||
| ea953fb256 | |||
| 2198e461c9 | |||
| 75abd8f54d | |||
| 2f184a493b | |||
| e2e06a78f9 | |||
| de5aba6d4d | |||
| 6e7266eacf | |||
| 58bb66ff27 | |||
| 46bfe02ee8 | |||
| cee9a9a755 | |||
| b11ba9b5cb | |||
| 789fc84e31 | |||
| 6426558b18 | |||
| 9a1ddedd94 | |||
| 0ae400d2b1 | |||
| ced122ac0d | |||
| dc7d2d5aeb | |||
| b6ba6c6e31 | |||
| 30312bbc03 | |||
| 94fe87b4a2 | |||
| 219bc12365 | |||
| 66394ab061 | |||
| 7348ed2179 | |||
| 0b94f2929d | |||
| c23e2502f3 | |||
| c418c59b53 | |||
| 3dc4ab5b83 | |||
| 148a6f341b | |||
| b5df26452a | |||
| 45792686aa | |||
| ee31e82707 | |||
| 0ba1226d88 | |||
| 520cc31f73 | |||
| a5a882a975 | |||
| 84f9309a7c | |||
| cf3800dbbe | |||
| d43455971b | |||
| 1ea0dabf42 | |||
| 0f43789666 | |||
| 4f8e8ed935 | |||
| 518508d5fe | |||
| e715b9fbfb | |||
| 4167de39d2 | |||
| 531ba5c31b | |||
| 031548ca7e | |||
| 866edfb167 | |||
| d1380fc19d | |||
| 46666d29d3 | |||
| ce5f2cc5ed | |||
| c5c7b84afd | |||
| 3432c8108c | |||
| 7c42a61e17 | |||
| 575521c025 | |||
| eab6c23333 | |||
| 8ee9454dbc | |||
| b46a8fd0ba | |||
| 77ef4869e3 | |||
| 07ac96661e | |||
| 98f8ef1b4b | |||
| 5564b4c7ae | |||
| 427dab6810 | |||
| ee62ea384a | |||
| ca4c4c8381 | |||
| e246c0cfd7 | |||
| 74025b2b5e | |||
| ccb269caa2 | |||
| 0f22e754f2 | |||
| 7cb0ed052d | |||
| 1ec36d2285 | |||
| b0ec7daece | |||
| 1292abcf91 | |||
| 136366f4d7 | |||
| 203b46196b | |||
| beec37b0da | |||
| 73a277f27b | |||
| 822d201159 | |||
| 8e07ec8727 | |||
| 7c339ed9e4 | |||
| be0b8bba0d | |||
| 521afab4aa | |||
| 789221d901 |
@@ -14,6 +14,7 @@ UI_PORT=3000
|
||||
AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
|
||||
# Google Tag Manager ID
|
||||
NEXT_PUBLIC_GOOGLE_TAG_MANAGER_ID=""
|
||||
|
||||
#### Code Review Configuration ####
|
||||
# Enable Claude Code standards validation on pre-push hook
|
||||
# Set to 'true' to validate changes against AGENTS.md standards via Claude Code
|
||||
@@ -40,6 +41,26 @@ POSTGRES_DB=prowler_db
|
||||
# POSTGRES_REPLICA_MAX_ATTEMPTS=3
|
||||
# POSTGRES_REPLICA_RETRY_BASE_DELAY=0.5
|
||||
|
||||
# Neo4j auth
|
||||
NEO4J_HOST=neo4j
|
||||
NEO4J_PORT=7687
|
||||
NEO4J_USER=neo4j
|
||||
NEO4J_PASSWORD=neo4j_password
|
||||
# Neo4j settings
|
||||
NEO4J_DBMS_MAX__DATABASES=1000000
|
||||
NEO4J_SERVER_MEMORY_PAGECACHE_SIZE=1G
|
||||
NEO4J_SERVER_MEMORY_HEAP_INITIAL__SIZE=1G
|
||||
NEO4J_SERVER_MEMORY_HEAP_MAX__SIZE=1G
|
||||
NEO4J_POC_EXPORT_FILE_ENABLED=true
|
||||
NEO4J_APOC_IMPORT_FILE_ENABLED=true
|
||||
NEO4J_APOC_IMPORT_FILE_USE_NEO4J_CONFIG=true
|
||||
NEO4J_PLUGINS=["apoc"]
|
||||
NEO4J_DBMS_SECURITY_PROCEDURES_ALLOWLIST=apoc.*
|
||||
NEO4J_DBMS_SECURITY_PROCEDURES_UNRESTRICTED=apoc.*
|
||||
NEO4J_DBMS_CONNECTOR_BOLT_LISTEN_ADDRESS=0.0.0.0:7687
|
||||
# Neo4j Prowler settings
|
||||
NEO4J_INSERT_BATCH_SIZE=500
|
||||
|
||||
# Celery-Prowler task settings
|
||||
TASK_RETRY_DELAY_SECONDS=0.1
|
||||
TASK_RETRY_ATTEMPTS=5
|
||||
@@ -107,6 +128,7 @@ DJANGO_THROTTLE_TOKEN_OBTAIN=50/minute
|
||||
# Sentry settings
|
||||
SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.12.2
|
||||
@@ -137,4 +159,3 @@ LANGCHAIN_PROJECT=""
|
||||
RSS_FEED_SOURCES='[{"id":"prowler-releases","name":"Prowler Releases","type":"github_releases","url":"https://github.com/prowler-cloud/prowler/releases.atom","enabled":true}]'
|
||||
# Example with multiple sources (no trailing comma after last item):
|
||||
# RSS_FEED_SOURCES='[{"id":"prowler-releases","name":"Prowler Releases","type":"github_releases","url":"https://github.com/prowler-cloud/prowler/releases.atom","enabled":true},{"id":"prowler-blog","name":"Prowler Blog","type":"blog","url":"https://prowler.com/blog/rss","enabled":false}]'
|
||||
|
||||
|
||||
@@ -3,14 +3,20 @@ name: 'API: Container Build and Push'
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'attack-paths-demo'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'prowler/**'
|
||||
- '.github/workflows/api-build-lint-push-containers.yml'
|
||||
- '.github/workflows/api-container-build-push.yml'
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -21,8 +27,8 @@ concurrency:
|
||||
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
LATEST_TAG: attack-paths-demo
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./api
|
||||
|
||||
@@ -44,7 +50,16 @@ jobs:
|
||||
|
||||
container-build-push:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -63,20 +78,8 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push API container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -89,21 +92,21 @@ jobs:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push API container (release)
|
||||
if: github.event_name == 'release'
|
||||
- name: Build and push API container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -117,6 +120,52 @@ jobs:
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@f61d18f46c86af724a9c804cb9ff2a6fec741c7c # main
|
||||
|
||||
- name: Cleanup intermediate architecture tags
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
name: 'Tools: Comment Label Update'
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types:
|
||||
- 'created'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
update-labels:
|
||||
if: contains(github.event.issue.labels.*.name, 'status/awaiting-response')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Remove 'status/awaiting-response' label
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
echo "Removing 'status/awaiting-response' label from #$ISSUE_NUMBER"
|
||||
gh api /repos/${{ github.repository }}/issues/$ISSUE_NUMBER/labels/status%2Fawaiting-response \
|
||||
-X DELETE
|
||||
|
||||
- name: Add 'status/waiting-for-revision' label
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
echo "Adding 'status/waiting-for-revision' label to #$ISSUE_NUMBER"
|
||||
gh api /repos/${{ github.repository }}/issues/$ISSUE_NUMBER/labels \
|
||||
-X POST \
|
||||
-f labels[]='status/waiting-for-revision'
|
||||
@@ -1,45 +0,0 @@
|
||||
name: Community PR labelling
|
||||
|
||||
on:
|
||||
# We need "write" permissions on the PR to be able to add a label.
|
||||
pull_request_target: # We need this to have labelling permissions. There are no user inputs here, so we should be fine.
|
||||
types:
|
||||
- opened
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
label-if-community:
|
||||
name: Add 'community' label if the PR is from a community contributor
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Check if author is org member
|
||||
id: check_membership
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
ORG: ${{ github.repository_owner }}
|
||||
run: |
|
||||
echo "Checking if $AUTHOR is a member of $ORG"
|
||||
if gh api --method GET "orgs/$ORG/members/$AUTHOR" >/dev/null 2>&1; then
|
||||
echo "is_member=true" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is an organization member"
|
||||
else
|
||||
echo "is_member=false" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is not an organization member"
|
||||
fi
|
||||
|
||||
- name: Add community label
|
||||
if: steps.check_membership.outputs.is_member == 'false'
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Adding 'community' label to PR #$PR_NUMBER"
|
||||
gh api /repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
|
||||
-X POST \
|
||||
-f labels[]='community'
|
||||
@@ -27,3 +27,66 @@ jobs:
|
||||
uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1
|
||||
with:
|
||||
sync-labels: true
|
||||
|
||||
label-community:
|
||||
name: Add 'community' label if the PR is from a community contributor
|
||||
needs: labeler
|
||||
if: github.repository == 'prowler-cloud/prowler' && github.event.action == 'opened'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Check if author is org member
|
||||
id: check_membership
|
||||
env:
|
||||
AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
# Hardcoded list of prowler-cloud organization members
|
||||
# This list includes members who have set their organization membership as private
|
||||
ORG_MEMBERS=(
|
||||
"AdriiiPRodri"
|
||||
"Alan-TheGentleman"
|
||||
"alejandrobailo"
|
||||
"amitsharm"
|
||||
"andoniaf"
|
||||
"cesararroba"
|
||||
"Chan9390"
|
||||
"danibarranqueroo"
|
||||
"HugoPBrito"
|
||||
"jfagoagas"
|
||||
"josemazo"
|
||||
"lydiavilchez"
|
||||
"mmuller88"
|
||||
"MrCloudSec"
|
||||
"pedrooot"
|
||||
"prowler-bot"
|
||||
"puchy22"
|
||||
"rakan-pro"
|
||||
"RosaRivasProwler"
|
||||
"StylusFrost"
|
||||
"toniblyx"
|
||||
"vicferpoy"
|
||||
)
|
||||
|
||||
echo "Checking if $AUTHOR is a member of prowler-cloud organization"
|
||||
|
||||
# Check if author is in the org members list
|
||||
if printf '%s\n' "${ORG_MEMBERS[@]}" | grep -q "^${AUTHOR}$"; then
|
||||
echo "is_member=true" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is an organization member"
|
||||
else
|
||||
echo "is_member=false" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is not an organization member"
|
||||
fi
|
||||
|
||||
- name: Add community label
|
||||
if: steps.check_membership.outputs.is_member == 'false'
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Adding 'community' label to PR #$PR_NUMBER"
|
||||
gh api /repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
|
||||
-X POST \
|
||||
-f labels[]='community'
|
||||
|
||||
@@ -10,6 +10,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -21,7 +27,7 @@ concurrency:
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./mcp_server
|
||||
|
||||
@@ -43,7 +49,16 @@ jobs:
|
||||
|
||||
container-build-push:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -61,27 +76,8 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push MCP container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Prowler MCP Server
|
||||
org.opencontainers.image.description=Model Context Protocol server for Prowler
|
||||
org.opencontainers.image.vendor=ProwlerPro, Inc.
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -94,29 +90,29 @@ jobs:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push MCP container (release)
|
||||
if: github.event_name == 'release'
|
||||
- name: Build and push MCP container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-${{ matrix.arch }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=Prowler MCP Server
|
||||
org.opencontainers.image.description=Model Context Protocol server for Prowler
|
||||
org.opencontainers.image.vendor=ProwlerPro, Inc.
|
||||
org.opencontainers.image.version=${{ env.RELEASE_TAG }}
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.created=${{ github.event.release.published_at }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
org.opencontainers.image.created=${{ github.event_name == 'release' && github.event.release.published_at || github.event.head_commit.timestamp }}
|
||||
${{ github.event_name == 'release' && format('org.opencontainers.image.version={0}', env.RELEASE_TAG) || '' }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -130,6 +126,52 @@ jobs:
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
|
||||
- name: Cleanup intermediate architecture tags
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -16,6 +16,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -46,7 +52,16 @@ env:
|
||||
jobs:
|
||||
container-build-push:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -54,6 +69,8 @@ jobs:
|
||||
outputs:
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: 'false'
|
||||
|
||||
@@ -88,16 +105,22 @@ jobs:
|
||||
3)
|
||||
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
|
||||
;;
|
||||
4)
|
||||
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
|
||||
;;
|
||||
5)
|
||||
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
|
||||
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v5 detected - tags: latest, stable"
|
||||
;;
|
||||
*)
|
||||
@@ -124,22 +147,8 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push SDK container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -152,26 +161,22 @@ jobs:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push SDK container (release)
|
||||
if: github.event_name == 'release'
|
||||
- name: Build and push SDK container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.PROWLER_VERSION }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -185,6 +190,66 @@ jobs:
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Public ECR
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.PUBLIC_ECR_AWS_SECRET_ACCESS_KEY }}
|
||||
env:
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.prowler_version }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.stable_tag }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
|
||||
- name: Cleanup intermediate architecture tags
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.container-build-push.outputs.latest_tag }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
dispatch-v3-deployment:
|
||||
if: needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
needs: container-build-push
|
||||
|
||||
@@ -10,6 +10,12 @@ on:
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag (e.g., 5.14.0)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -21,7 +27,7 @@ concurrency:
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.release_tag }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./ui
|
||||
|
||||
@@ -46,7 +52,16 @@ jobs:
|
||||
|
||||
container-build-push:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -65,23 +80,8 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push UI container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ needs.setup.outputs.short-sha }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -94,24 +94,24 @@ jobs:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push UI container (release)
|
||||
if: github.event_name == 'release'
|
||||
- name: Build and push UI container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${{ env.RELEASE_TAG }}
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && format('v{0}', env.RELEASE_TAG) || needs.setup.outputs.short-sha }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
if: (github.event_name == 'release' || github.event_name == 'workflow_dispatch') && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
@@ -125,6 +125,52 @@ jobs:
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
needs: [setup, container-build-push]
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Create and push manifests for release event
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@main
|
||||
|
||||
- name: Cleanup intermediate architecture tags
|
||||
if: always()
|
||||
run: |
|
||||
echo "Cleaning up intermediate tags..."
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-amd64" || true
|
||||
regctl tag delete "${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-arm64" || true
|
||||
echo "Cleanup completed"
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
|
||||
@@ -45,21 +45,86 @@ pytest_*.xml
|
||||
.coverage
|
||||
htmlcov/
|
||||
|
||||
# VSCode files
|
||||
# VSCode files and settings
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
.vscode-test/
|
||||
|
||||
# Cursor files
|
||||
# VSCode extension settings and workspaces
|
||||
.history/
|
||||
.ionide/
|
||||
|
||||
# MCP Server Settings (various locations)
|
||||
**/cline_mcp_settings.json
|
||||
**/mcp_settings.json
|
||||
**/mcp-config.json
|
||||
**/mcpServers.json
|
||||
.mcp/
|
||||
|
||||
# AI Coding Assistants - Cursor
|
||||
.cursorignore
|
||||
.cursor/
|
||||
.cursorrules
|
||||
|
||||
# RooCode files
|
||||
# AI Coding Assistants - RooCode
|
||||
.roo/
|
||||
.rooignore
|
||||
.roomodes
|
||||
|
||||
# Cline files
|
||||
# AI Coding Assistants - Cline (formerly Claude Dev)
|
||||
.cline/
|
||||
.clineignore
|
||||
.clinerules
|
||||
|
||||
# AI Coding Assistants - Continue
|
||||
.continue/
|
||||
continue.json
|
||||
.continuerc
|
||||
.continuerc.json
|
||||
|
||||
# AI Coding Assistants - GitHub Copilot
|
||||
.copilot/
|
||||
.github/copilot/
|
||||
|
||||
# AI Coding Assistants - Amazon Q Developer (formerly CodeWhisperer)
|
||||
.aws/
|
||||
.codewhisperer/
|
||||
.amazonq/
|
||||
.aws-toolkit/
|
||||
|
||||
# AI Coding Assistants - Tabnine
|
||||
.tabnine/
|
||||
tabnine_config.json
|
||||
|
||||
# AI Coding Assistants - Kiro
|
||||
.kiro/
|
||||
.kiroignore
|
||||
kiro.config.json
|
||||
|
||||
# AI Coding Assistants - Aider
|
||||
.aider/
|
||||
.aider.chat.history.md
|
||||
.aider.input.history
|
||||
.aider.tags.cache.v3/
|
||||
|
||||
# AI Coding Assistants - Windsurf
|
||||
.windsurf/
|
||||
.windsurfignore
|
||||
|
||||
# AI Coding Assistants - Replit Agent
|
||||
.replit
|
||||
.replitignore
|
||||
|
||||
# AI Coding Assistants - Supermaven
|
||||
.supermaven/
|
||||
|
||||
# AI Coding Assistants - Sourcegraph Cody
|
||||
.cody/
|
||||
|
||||
# AI Coding Assistants - General
|
||||
.ai/
|
||||
.aiconfig
|
||||
ai-config.json
|
||||
|
||||
# Terraform
|
||||
.terraform*
|
||||
|
||||
@@ -126,3 +126,12 @@ repos:
|
||||
entry: bash -c 'vulture --exclude "contrib,.venv,api/src/backend/api/tests/,api/src/backend/conftest.py,api/src/backend/tasks/tests/" --min-confidence 100 .'
|
||||
language: system
|
||||
files: '.*\.py'
|
||||
|
||||
- id: ui-checks
|
||||
name: UI - Husky Pre-commit
|
||||
description: "Run UI pre-commit checks (Claude Code validation + healthcheck)"
|
||||
entry: bash -c 'cd ui && .husky/pre-commit'
|
||||
language: system
|
||||
files: '^ui/.*\.(ts|tsx|js|jsx|json|css)$'
|
||||
pass_filenames: false
|
||||
verbose: true
|
||||
|
||||
@@ -4,6 +4,10 @@ LABEL maintainer="https://github.com/prowler-cloud/prowler"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prowler-cloud/prowler"
|
||||
|
||||
ARG POWERSHELL_VERSION=7.5.0
|
||||
ENV POWERSHELL_VERSION=${POWERSHELL_VERSION}
|
||||
|
||||
ARG TRIVY_VERSION=0.66.0
|
||||
ENV TRIVY_VERSION=${TRIVY_VERSION}
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
@@ -25,6 +29,24 @@ RUN ARCH=$(uname -m) && \
|
||||
ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh && \
|
||||
rm /tmp/powershell.tar.gz
|
||||
|
||||
# Install Trivy for IaC scanning
|
||||
RUN ARCH=$(uname -m) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then \
|
||||
TRIVY_ARCH="Linux-64bit" ; \
|
||||
elif [ "$ARCH" = "aarch64" ]; then \
|
||||
TRIVY_ARCH="Linux-ARM64" ; \
|
||||
else \
|
||||
echo "Unsupported architecture for Trivy: $ARCH" && exit 1 ; \
|
||||
fi && \
|
||||
wget --progress=dot:giga "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_${TRIVY_ARCH}.tar.gz" -O /tmp/trivy.tar.gz && \
|
||||
tar zxf /tmp/trivy.tar.gz -C /tmp && \
|
||||
mv /tmp/trivy /usr/local/bin/trivy && \
|
||||
chmod +x /usr/local/bin/trivy && \
|
||||
rm /tmp/trivy.tar.gz && \
|
||||
# Create trivy cache directory with proper permissions
|
||||
mkdir -p /tmp/.cache/trivy && \
|
||||
chmod 777 /tmp/.cache/trivy
|
||||
|
||||
# Add prowler user
|
||||
RUN addgroup --gid 1000 prowler && \
|
||||
adduser --uid 1000 --gid 1000 --disabled-password --gecos "" prowler
|
||||
|
||||
@@ -75,6 +75,23 @@ prowler dashboard
|
||||
```
|
||||

|
||||
|
||||
|
||||
## Attack Paths
|
||||
|
||||
Attack Paths automatically extends every completed AWS scan with a Neo4j graph that combines Cartography's cloud inventory with Prowler findings. The feature runs in the API worker after each scan and therefore requires:
|
||||
|
||||
- An accessible Neo4j instance (the Docker Compose files already ships a `neo4j` service).
|
||||
- The following environment variables so Django and Celery can connect:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| `NEO4J_HOST` | Hostname used by the API containers. | `neo4j` |
|
||||
| `NEO4J_PORT` | Bolt port exposed by Neo4j. | `7687` |
|
||||
| `NEO4J_USER` / `NEO4J_PASSWORD` | Credentials with rights to create per-tenant databases. | `neo4j` / `neo4j_password` |
|
||||
|
||||
Every AWS provider scan will enqueue an Attack Paths ingestion job automatically. Other cloud providers will be added in future iterations.
|
||||
|
||||
|
||||
# Prowler at a Glance
|
||||
> [!Tip]
|
||||
> For the most accurate and up-to-date information about checks, services, frameworks, and categories, visit [**Prowler Hub**](https://hub.prowler.com).
|
||||
@@ -82,15 +99,15 @@ prowler dashboard
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) | Support | Interface |
|
||||
|---|---|---|---|---|---|---|
|
||||
| AWS | 576 | 82 | 38 | 10 | Official | UI, API, CLI |
|
||||
| GCP | 79 | 13 | 12 | 3 | Official | UI, API, CLI |
|
||||
| Azure | 162 | 19 | 12 | 4 | Official | UI, API, CLI |
|
||||
| AWS | 576 | 82 | 39 | 10 | Official | UI, API, CLI |
|
||||
| GCP | 79 | 13 | 13 | 3 | Official | UI, API, CLI |
|
||||
| Azure | 162 | 19 | 13 | 4 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 5 | 7 | Official | UI, API, CLI |
|
||||
| GitHub | 17 | 2 | 1 | 0 | Official | Stable | UI, API, CLI |
|
||||
| M365 | 70 | 7 | 3 | 2 | Official | UI, API, CLI |
|
||||
| OCI | 51 | 13 | 1 | 10 | Official | UI, API, CLI |
|
||||
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | UI, API, CLI |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 0 | Official | CLI, API |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 0 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
|
||||
@@ -2,7 +2,25 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.15.0] (Prowler UNRELEASED)
|
||||
## [1.16.0] (Unreleased)
|
||||
|
||||
### Added
|
||||
- Attack Paths backend support [(#9344)](https://github.com/prowler-cloud/prowler/pull/9344)
|
||||
|
||||
### Changed
|
||||
- Restore the compliance overview endpoint's mandatory filters [(#9330)](https://github.com/prowler-cloud/prowler/pull/9330)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.1] (Prowler v5.14.1)
|
||||
|
||||
### Fixed
|
||||
- Fix typo in PDF reporting [(#9345)](https://github.com/prowler-cloud/prowler/pull/9345)
|
||||
- Fix IaC provider initialization failure when mutelist processor is configured [(#9331)](https://github.com/prowler-cloud/prowler/pull/9331)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.0] (Prowler v5.14.0)
|
||||
|
||||
### Added
|
||||
- IaC (Infrastructure as Code) provider support for remote repositories [(#8751)](https://github.com/prowler-cloud/prowler/pull/8751)
|
||||
@@ -14,16 +32,33 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
- Support muting findings based on simple rules with custom reason [(#9051)](https://github.com/prowler-cloud/prowler/pull/9051)
|
||||
- Support C5 compliance framework for the GCP provider [(#9097)](https://github.com/prowler-cloud/prowler/pull/9097)
|
||||
- Support for Amazon Bedrock and OpenAI compatible providers in Lighthouse AI [(#8957)](https://github.com/prowler-cloud/prowler/pull/8957)
|
||||
- Support PDF reporting for ENS compliance framework [(#9158)](https://github.com/prowler-cloud/prowler/pull/9158)
|
||||
- Support PDF reporting for NIS2 compliance framework [(#9170)](https://github.com/prowler-cloud/prowler/pull/9170)
|
||||
- Tenant-wide ThreatScore overview aggregation and snapshot persistence with backfill support [(#9148)](https://github.com/prowler-cloud/prowler/pull/9148)
|
||||
- Added `metadata`, `details`, and `partition` attributes to `/resources` endpoint & `details`, and `partition` to `/findings` endpoint [(#9098)](https://github.com/prowler-cloud/prowler/pull/9098)
|
||||
- Support for MongoDB Atlas provider [(#9167)](https://github.com/prowler-cloud/prowler/pull/9167)
|
||||
- Support Prowler ThreatScore for the K8S provider [(#9235)](https://github.com/prowler-cloud/prowler/pull/9235)
|
||||
- Enhanced compliance overview endpoint with provider filtering and latest scan aggregation [(#9244)](https://github.com/prowler-cloud/prowler/pull/9244)
|
||||
- New endpoint `GET /api/v1/overview/regions` to retrieve aggregated findings data by region [(#9273)](https://github.com/prowler-cloud/prowler/pull/9273)
|
||||
|
||||
### Changed
|
||||
- Optimized database write queries for scan related tasks [(#9190)](https://github.com/prowler-cloud/prowler/pull/9190)
|
||||
- Date filters are now optional for `GET /api/v1/overviews/services` endpoint; returns latest scan data by default [(#9248)](https://github.com/prowler-cloud/prowler/pull/9248)
|
||||
|
||||
### Fixed
|
||||
- Scans no longer fail when findings have UIDs exceeding 300 characters; such findings are now skipped with detailed logging [(#9246)](https://github.com/prowler-cloud/prowler/pull/9246)
|
||||
- Updated unique constraint for `Provider` model to exclude soft-deleted entries, resolving duplicate errors when re-deleting providers [(#9054)](https://github.com/prowler-cloud/prowler/pull/9054)
|
||||
- Removed compliance generation for providers without compliance frameworks [(#9208)](https://github.com/prowler-cloud/prowler/pull/9208)
|
||||
- Refresh output report timestamps for each scan [(#9272)](https://github.com/prowler-cloud/prowler/pull/9272)
|
||||
- Severity overview endpoint now ignores muted findings as expected [(#9283)](https://github.com/prowler-cloud/prowler/pull/9283)
|
||||
- Fixed discrepancy between ThreatScore PDF report values and database calculations [(#9296)](https://github.com/prowler-cloud/prowler/pull/9296)
|
||||
|
||||
### Security
|
||||
- Django updated to the latest 5.1 security release, 5.1.14, due to problems with potential [SQL injection](https://github.com/prowler-cloud/prowler/security/dependabot/113) and [denial-of-service vulnerability](https://github.com/prowler-cloud/prowler/security/dependabot/114) [(#9176)](https://github.com/prowler-cloud/prowler/pull/9176)
|
||||
|
||||
---
|
||||
|
||||
## [1.14.2] (Prowler 5.13.2)
|
||||
|
||||
### Fixed
|
||||
- Update unique constraint for `Provider` model to exclude soft-deleted entries, resolving duplicate errors when re-deleting providers.
|
||||
|
||||
## [1.14.1] (Prowler 5.13.1)
|
||||
## [1.14.1] (Prowler v5.13.1)
|
||||
|
||||
### Fixed
|
||||
- `/api/v1/overviews/providers` collapses data by provider type so the UI receives a single aggregated record per cloud family even when multiple accounts exist [(#9053)](https://github.com/prowler-cloud/prowler/pull/9053)
|
||||
@@ -32,7 +67,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.14.0] (Prowler 5.13.0)
|
||||
## [1.14.0] (Prowler v5.13.0)
|
||||
|
||||
### Added
|
||||
- Default JWT keys are generated and stored if they are missing from configuration [(#8655)](https://github.com/prowler-cloud/prowler/pull/8655)
|
||||
@@ -56,14 +91,14 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.13.2] (Prowler 5.12.3)
|
||||
## [1.13.2] (Prowler v5.12.3)
|
||||
|
||||
### Fixed
|
||||
- 500 error when deleting user [(#8731)](https://github.com/prowler-cloud/prowler/pull/8731)
|
||||
|
||||
---
|
||||
|
||||
## [1.13.1] (Prowler 5.12.2)
|
||||
## [1.13.1] (Prowler v5.12.2)
|
||||
|
||||
### Changed
|
||||
- Renamed compliance overview task queue to `compliance` [(#8755)](https://github.com/prowler-cloud/prowler/pull/8755)
|
||||
@@ -73,7 +108,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.13.0] (Prowler 5.12.0)
|
||||
## [1.13.0] (Prowler v5.12.0)
|
||||
|
||||
### Added
|
||||
- Integration with JIRA, enabling sending findings to a JIRA project [(#8622)](https://github.com/prowler-cloud/prowler/pull/8622), [(#8637)](https://github.com/prowler-cloud/prowler/pull/8637)
|
||||
@@ -82,7 +117,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.12.0] (Prowler 5.11.0)
|
||||
## [1.12.0] (Prowler v5.11.0)
|
||||
|
||||
### Added
|
||||
- Lighthouse support for OpenAI GPT-5 [(#8527)](https://github.com/prowler-cloud/prowler/pull/8527)
|
||||
@@ -94,7 +129,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.11.0] (Prowler 5.10.0)
|
||||
## [1.11.0] (Prowler v5.10.0)
|
||||
|
||||
### Added
|
||||
- Github provider support [(#8271)](https://github.com/prowler-cloud/prowler/pull/8271)
|
||||
|
||||
@@ -7,7 +7,7 @@ authors = [{name = "Prowler Engineering", email = "engineering@prowler.com"}]
|
||||
dependencies = [
|
||||
"celery[pytest] (>=5.4.0,<6.0.0)",
|
||||
"dj-rest-auth[with_social,jwt] (==7.0.1)",
|
||||
"django (==5.1.13)",
|
||||
"django (==5.1.14)",
|
||||
"django-allauth[saml] (>=65.8.0,<66.0.0)",
|
||||
"django-celery-beat (>=2.7.0,<3.0.0)",
|
||||
"django-celery-results (>=2.5.1,<3.0.0)",
|
||||
@@ -24,7 +24,7 @@ dependencies = [
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"gunicorn==23.0.0",
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@attack-paths-demo",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
@@ -35,7 +35,9 @@ dependencies = [
|
||||
"markdown (>=3.9,<4.0)",
|
||||
"drf-simple-apikey (==2.2.1)",
|
||||
"matplotlib (>=3.10.6,<4.0.0)",
|
||||
"reportlab (>=4.4.4,<5.0.0)"
|
||||
"reportlab (>=4.4.4,<5.0.0)",
|
||||
"neo4j (<6.0.0)",
|
||||
"cartography @ git+https://github.com/prowler-cloud/cartography@master",
|
||||
]
|
||||
description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import atexit
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -30,6 +31,7 @@ class ApiConfig(AppConfig):
|
||||
def ready(self):
|
||||
from api import schema_extensions # noqa: F401
|
||||
from api import signals # noqa: F401
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.compliance import load_prowler_compliance
|
||||
|
||||
# Generate required cryptographic keys if not present, but only if:
|
||||
@@ -39,6 +41,10 @@ class ApiConfig(AppConfig):
|
||||
if "manage.py" not in sys.argv or os.environ.get("RUN_MAIN"):
|
||||
self._ensure_crypto_keys()
|
||||
|
||||
if not getattr(settings, "TESTING", False):
|
||||
graph_database.init_driver()
|
||||
atexit.register(graph_database.close_driver)
|
||||
|
||||
load_prowler_compliance()
|
||||
|
||||
def _ensure_crypto_keys(self):
|
||||
@@ -54,7 +60,7 @@ class ApiConfig(AppConfig):
|
||||
global _keys_initialized
|
||||
|
||||
# Skip key generation if running tests
|
||||
if hasattr(settings, "TESTING") and settings.TESTING:
|
||||
if getattr(settings, "TESTING", False):
|
||||
return
|
||||
|
||||
# Skip if already initialized in this process
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
from api.attack_paths.query_definitions import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
get_queries_for_provider,
|
||||
get_query_by_id,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AttackPathsQueryDefinition",
|
||||
"AttackPathsQueryParameterDefinition",
|
||||
"get_queries_for_provider",
|
||||
"get_query_by_id",
|
||||
]
|
||||
@@ -0,0 +1,144 @@
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Iterator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
import neo4j.exceptions
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from api.attack_paths.retryable_session import RetryableSession
|
||||
|
||||
# Without this Celery goes crazy with Neo4j logging
|
||||
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
||||
logging.getLogger("neo4j").propagate = False
|
||||
|
||||
SERVICE_UNAVAILABLE_MAX_RETRIES = 3
|
||||
|
||||
# Module-level process-wide driver singleton
|
||||
_driver: neo4j.Driver | None = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
# Base Neo4j functions
|
||||
|
||||
|
||||
def get_uri() -> str:
|
||||
host = settings.DATABASES["neo4j"]["HOST"]
|
||||
port = settings.DATABASES["neo4j"]["PORT"]
|
||||
return f"bolt://{host}:{port}"
|
||||
|
||||
|
||||
def init_driver() -> neo4j.Driver:
|
||||
global _driver
|
||||
if _driver is not None:
|
||||
return _driver
|
||||
|
||||
with _lock:
|
||||
if _driver is None:
|
||||
uri = get_uri()
|
||||
config = settings.DATABASES["neo4j"]
|
||||
|
||||
_driver = neo4j.GraphDatabase.driver(
|
||||
uri, auth=(config["USER"], config["PASSWORD"])
|
||||
)
|
||||
_driver.verify_connectivity()
|
||||
|
||||
return _driver
|
||||
|
||||
|
||||
def get_driver() -> neo4j.Driver:
|
||||
return init_driver()
|
||||
|
||||
|
||||
def close_driver() -> None: # TODO: Use it
|
||||
global _driver
|
||||
with _lock:
|
||||
if _driver is not None:
|
||||
try:
|
||||
_driver.close()
|
||||
|
||||
finally:
|
||||
_driver = None
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_session(database: str | None = None) -> Iterator[RetryableSession]:
|
||||
session_wrapper: RetryableSession | None = None
|
||||
|
||||
try:
|
||||
session_wrapper = RetryableSession(
|
||||
session_factory=lambda: get_driver().session(database=database),
|
||||
close_driver=close_driver, # Just to avoid circular imports
|
||||
max_retries=SERVICE_UNAVAILABLE_MAX_RETRIES,
|
||||
)
|
||||
yield session_wrapper
|
||||
|
||||
except neo4j.exceptions.Neo4jError as exc:
|
||||
raise GraphDatabaseQueryException(message=exc.message, code=exc.code)
|
||||
|
||||
finally:
|
||||
if session_wrapper is not None:
|
||||
session_wrapper.close()
|
||||
|
||||
|
||||
def create_database(database: str) -> None:
|
||||
query = "CREATE DATABASE $database IF NOT EXISTS"
|
||||
parameters = {"database": database}
|
||||
|
||||
with get_session() as session:
|
||||
session.run(query, parameters)
|
||||
|
||||
|
||||
def drop_database(database: str) -> None:
|
||||
query = f"DROP DATABASE `{database}` IF EXISTS DESTROY DATA"
|
||||
|
||||
with get_session() as session:
|
||||
session.run(query)
|
||||
|
||||
|
||||
def drop_subgraph(database: str, root_node_label: str, root_node_id: str) -> int:
|
||||
query = """
|
||||
MATCH (a:__ROOT_NODE_LABEL__ {id: $root_node_id})
|
||||
CALL apoc.path.subgraphNodes(a, {})
|
||||
YIELD node
|
||||
DETACH DELETE node
|
||||
RETURN COUNT(node) AS deleted_nodes_count
|
||||
""".replace("__ROOT_NODE_LABEL__", root_node_label)
|
||||
parameters = {"root_node_id": root_node_id}
|
||||
|
||||
with get_session(database) as session:
|
||||
result = session.run(query, parameters)
|
||||
|
||||
try:
|
||||
return result.single()["deleted_nodes_count"]
|
||||
|
||||
except neo4j.exceptions.ResultConsumedError:
|
||||
return 0 # As there are no nodes to delete, the result is empty
|
||||
|
||||
|
||||
# Neo4j functions related to Prowler + Cartography
|
||||
DATABASE_NAME_TEMPLATE = "db-{attack_paths_scan_id}"
|
||||
|
||||
|
||||
def get_database_name(attack_paths_scan_id: UUID) -> str:
|
||||
attack_paths_scan_id_str = str(attack_paths_scan_id).lower()
|
||||
return DATABASE_NAME_TEMPLATE.format(attack_paths_scan_id=attack_paths_scan_id_str)
|
||||
|
||||
|
||||
# Exceptions
|
||||
|
||||
|
||||
class GraphDatabaseQueryException(Exception):
|
||||
def __init__(self, message: str, code: str | None = None) -> None:
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
self.code = code
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.code:
|
||||
return f"{self.code}: {self.message}"
|
||||
|
||||
return self.message
|
||||
@@ -0,0 +1,87 @@
|
||||
import logging
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
import neo4j
|
||||
import neo4j.exceptions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RetryableSession:
|
||||
"""
|
||||
Wrapper around `neo4j.Session` that retries `neo4j.exceptions.ServiceUnavailable` errors.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session_factory: Callable[[], neo4j.Session],
|
||||
close_driver: Callable[[], None], # Just to avoid circular imports
|
||||
max_retries: int,
|
||||
) -> None:
|
||||
self._session_factory = session_factory
|
||||
self._close_driver = close_driver
|
||||
self._max_retries = max(0, max_retries)
|
||||
self._session = self._session_factory()
|
||||
|
||||
def close(self) -> None:
|
||||
if self._session is not None:
|
||||
self._session.close()
|
||||
self._session = None
|
||||
|
||||
def __enter__(self) -> "RetryableSession":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type: Any, exc: Any, exc_tb: Any) -> None:
|
||||
self.close()
|
||||
|
||||
def run(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._call_with_retry("run", *args, **kwargs)
|
||||
|
||||
def write_transaction(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._call_with_retry("write_transaction", *args, **kwargs)
|
||||
|
||||
def read_transaction(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._call_with_retry("read_transaction", *args, **kwargs)
|
||||
|
||||
def execute_write(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._call_with_retry("execute_write", *args, **kwargs)
|
||||
|
||||
def execute_read(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._call_with_retry("execute_read", *args, **kwargs)
|
||||
|
||||
def __getattr__(self, item: str) -> Any:
|
||||
return getattr(self._session, item)
|
||||
|
||||
def _call_with_retry(self, method_name: str, *args: Any, **kwargs: Any) -> Any:
|
||||
attempt = 0
|
||||
last_exc: neo4j.exceptions.ServiceUnavailable | None = None
|
||||
|
||||
while attempt <= self._max_retries:
|
||||
try:
|
||||
method = getattr(self._session, method_name)
|
||||
return method(*args, **kwargs)
|
||||
|
||||
except (
|
||||
neo4j.exceptions.ServiceUnavailable
|
||||
) as exc: # pragma: no cover - depends on infra
|
||||
last_exc = exc
|
||||
attempt += 1
|
||||
|
||||
if attempt > self._max_retries:
|
||||
raise
|
||||
|
||||
logger.warning(
|
||||
f"Neo4j session {method_name} failed with ServiceUnavailable ({attempt}/{self._max_retries} attempts). Retrying..."
|
||||
)
|
||||
self._refresh_session()
|
||||
|
||||
raise last_exc if last_exc else RuntimeError("Unexpected retry loop exit")
|
||||
|
||||
def _refresh_session(self) -> None:
|
||||
if self._session is not None:
|
||||
self._session.close()
|
||||
|
||||
self._close_driver()
|
||||
self._session = self._session_factory()
|
||||
@@ -0,0 +1,143 @@
|
||||
import logging
|
||||
|
||||
from typing import Any
|
||||
|
||||
from rest_framework.exceptions import APIException, ValidationError
|
||||
|
||||
from api.attack_paths import database as graph_database, AttackPathsQueryDefinition
|
||||
from api.models import AttackPathsScan
|
||||
from config.custom_logging import BackendLogger
|
||||
|
||||
logger = logging.getLogger(BackendLogger.API)
|
||||
|
||||
|
||||
def normalize_run_payload(raw_data):
|
||||
if not isinstance(raw_data, dict): # Let the serializer handle this
|
||||
return raw_data
|
||||
|
||||
if "data" in raw_data and isinstance(raw_data.get("data"), dict):
|
||||
data_section = raw_data.get("data") or {}
|
||||
attributes = data_section.get("attributes") or {}
|
||||
payload = {
|
||||
"id": attributes.get("id", data_section.get("id")),
|
||||
"parameters": attributes.get("parameters"),
|
||||
}
|
||||
|
||||
# Remove `None` parameters to allow defaults downstream
|
||||
if payload.get("parameters") is None:
|
||||
payload.pop("parameters")
|
||||
return payload
|
||||
|
||||
return raw_data
|
||||
|
||||
|
||||
def prepare_query_parameters(
|
||||
definition: AttackPathsQueryDefinition,
|
||||
provided_parameters: dict[str, Any],
|
||||
provider_uid: str,
|
||||
) -> dict[str, Any]:
|
||||
parameters = dict(provided_parameters or {})
|
||||
expected_names = {parameter.name for parameter in definition.parameters}
|
||||
provided_names = set(parameters.keys())
|
||||
|
||||
unexpected = provided_names - expected_names
|
||||
if unexpected:
|
||||
raise ValidationError(
|
||||
{"parameters": f"Unknown parameter(s): {', '.join(sorted(unexpected))}"}
|
||||
)
|
||||
|
||||
missing = expected_names - provided_names
|
||||
if missing:
|
||||
raise ValidationError(
|
||||
{
|
||||
"parameters": f"Missing required parameter(s): {', '.join(sorted(missing))}"
|
||||
}
|
||||
)
|
||||
|
||||
clean_parameters = {
|
||||
"provider_uid": str(provider_uid),
|
||||
}
|
||||
|
||||
for definition_parameter in definition.parameters:
|
||||
raw_value = provided_parameters[definition_parameter.name]
|
||||
|
||||
try:
|
||||
casted_value = definition_parameter.cast(raw_value)
|
||||
|
||||
except (ValueError, TypeError) as exc:
|
||||
raise ValidationError(
|
||||
{
|
||||
"parameters": (
|
||||
f"Invalid value for parameter `{definition_parameter.name}`: {str(exc)}"
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
clean_parameters[definition_parameter.name] = casted_value
|
||||
|
||||
return clean_parameters
|
||||
|
||||
|
||||
def execute_attack_paths_query(
|
||||
attack_paths_scan: AttackPathsScan,
|
||||
definition: AttackPathsQueryDefinition,
|
||||
parameters: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
try:
|
||||
with graph_database.get_session(attack_paths_scan.graph_database) as session:
|
||||
result = session.run(definition.cypher, parameters)
|
||||
return _serialize_graph(result.graph())
|
||||
|
||||
except graph_database.GraphDatabaseQueryException as exc:
|
||||
logger.error(f"Query failed for Attack Paths query `{definition.id}`: {exc}")
|
||||
raise APIException(
|
||||
"Attack Paths query execution failed due to a database error"
|
||||
)
|
||||
|
||||
|
||||
def _serialize_graph(graph):
|
||||
nodes = []
|
||||
for node in graph.nodes:
|
||||
nodes.append(
|
||||
{
|
||||
"id": node.element_id,
|
||||
"labels": list(node.labels),
|
||||
"properties": _serialize_properties(node._properties),
|
||||
},
|
||||
)
|
||||
|
||||
relationships = []
|
||||
for relationship in graph.relationships:
|
||||
relationships.append(
|
||||
{
|
||||
"id": relationship.element_id,
|
||||
"label": relationship.type,
|
||||
"source": relationship.start_node.element_id,
|
||||
"target": relationship.end_node.element_id,
|
||||
"properties": _serialize_properties(relationship._properties),
|
||||
},
|
||||
)
|
||||
|
||||
return {
|
||||
"nodes": nodes,
|
||||
"relationships": relationships,
|
||||
}
|
||||
|
||||
|
||||
def _serialize_properties(properties: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Convert Neo4j property values into JSON-serializable primitives."""
|
||||
|
||||
def _serialize_value(value: Any) -> Any:
|
||||
# Neo4j temporal and spatial values expose `to_native` returning Python primitives
|
||||
if hasattr(value, "to_native") and callable(value.to_native):
|
||||
return _serialize_value(value.to_native())
|
||||
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [_serialize_value(item) for item in value]
|
||||
|
||||
if isinstance(value, dict):
|
||||
return {key: _serialize_value(val) for key, val in value.items()}
|
||||
|
||||
return value
|
||||
|
||||
return {key: _serialize_value(val) for key, val in properties.items()}
|
||||
@@ -144,6 +144,7 @@ def generate_scan_compliance(
|
||||
Returns:
|
||||
None: This function modifies the compliance_overview in place.
|
||||
"""
|
||||
|
||||
for compliance_id in PROWLER_CHECKS[provider_type][check_id]:
|
||||
for requirement in compliance_overview[compliance_id]["requirements"].values():
|
||||
if check_id in requirement["checks"]:
|
||||
|
||||
@@ -27,6 +27,7 @@ from api.models import (
|
||||
Finding,
|
||||
Integration,
|
||||
Invitation,
|
||||
AttackPathsScan,
|
||||
LighthouseProviderConfiguration,
|
||||
LighthouseProviderModels,
|
||||
Membership,
|
||||
@@ -47,6 +48,7 @@ from api.models import (
|
||||
StatusChoices,
|
||||
Task,
|
||||
TenantAPIKey,
|
||||
ThreatScoreSnapshot,
|
||||
User,
|
||||
)
|
||||
from api.rls import Tenant
|
||||
@@ -329,6 +331,23 @@ class ScanFilter(ProviderRelationshipFilterSet):
|
||||
}
|
||||
|
||||
|
||||
class AttackPathsScanFilter(ProviderRelationshipFilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
completed_at = DateFilter(field_name="completed_at", lookup_expr="date")
|
||||
started_at = DateFilter(field_name="started_at", lookup_expr="date")
|
||||
state = ChoiceFilter(choices=StateChoices.choices)
|
||||
state__in = ChoiceInFilter(
|
||||
field_name="state", choices=StateChoices.choices, lookup_expr="in"
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = AttackPathsScan
|
||||
fields = {
|
||||
"provider": ["exact", "in"],
|
||||
"scan": ["exact", "in"],
|
||||
}
|
||||
|
||||
|
||||
class TaskFilter(FilterSet):
|
||||
name = CharFilter(field_name="task_runner_task__task_name", lookup_expr="exact")
|
||||
name__icontains = CharFilter(
|
||||
@@ -811,7 +830,8 @@ class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
elif value == OverviewStatusChoices.PASS:
|
||||
return queryset.annotate(status_count=F("_pass"))
|
||||
else:
|
||||
return queryset.annotate(status_count=F("total"))
|
||||
# Exclude muted findings by default
|
||||
return queryset.annotate(status_count=F("_pass") + F("fail"))
|
||||
|
||||
def filter_status_in(self, queryset, name, value):
|
||||
# Validate the status values
|
||||
@@ -820,7 +840,7 @@ class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
if status_val not in valid_statuses:
|
||||
raise ValidationError(f"Invalid status value: {status_val}")
|
||||
|
||||
# If all statuses or no valid statuses, use total
|
||||
# If all statuses or no valid statuses, exclude muted findings (pass + fail)
|
||||
if (
|
||||
set(value)
|
||||
>= {
|
||||
@@ -829,7 +849,7 @@ class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
}
|
||||
or not value
|
||||
):
|
||||
return queryset.annotate(status_count=F("total"))
|
||||
return queryset.annotate(status_count=F("_pass") + F("fail"))
|
||||
|
||||
# Build the sum expression based on status values
|
||||
sum_expression = None
|
||||
@@ -847,7 +867,7 @@ class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
sum_expression = sum_expression + field_expr
|
||||
|
||||
if sum_expression is None:
|
||||
return queryset.annotate(status_count=F("total"))
|
||||
return queryset.annotate(status_count=F("_pass") + F("fail"))
|
||||
|
||||
return queryset.annotate(status_count=sum_expression)
|
||||
|
||||
@@ -859,26 +879,6 @@ class ScanSummarySeverityFilter(ScanSummaryFilter):
|
||||
}
|
||||
|
||||
|
||||
class ServiceOverviewFilter(ScanSummaryFilter):
|
||||
def is_valid(self):
|
||||
# Check if at least one of the inserted_at filters is present
|
||||
inserted_at_filters = [
|
||||
self.data.get("inserted_at"),
|
||||
self.data.get("inserted_at__gte"),
|
||||
self.data.get("inserted_at__lte"),
|
||||
]
|
||||
if not any(inserted_at_filters):
|
||||
raise ValidationError(
|
||||
{
|
||||
"inserted_at": [
|
||||
"At least one of filter[inserted_at], filter[inserted_at__gte], or "
|
||||
"filter[inserted_at__lte] is required."
|
||||
]
|
||||
}
|
||||
)
|
||||
return super().is_valid()
|
||||
|
||||
|
||||
class IntegrationFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
integration_type = ChoiceFilter(choices=Integration.IntegrationChoices.choices)
|
||||
@@ -998,3 +998,36 @@ class MuteRuleFilter(FilterSet):
|
||||
"inserted_at": ["gte", "lte"],
|
||||
"updated_at": ["gte", "lte"],
|
||||
}
|
||||
|
||||
|
||||
class ThreatScoreSnapshotFilter(FilterSet):
|
||||
"""
|
||||
Filter for ThreatScore snapshots.
|
||||
Allows filtering by scan, provider, compliance_id, and date ranges.
|
||||
"""
|
||||
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
scan_id = UUIDFilter(field_name="scan__id", lookup_expr="exact")
|
||||
scan_id__in = UUIDInFilter(field_name="scan__id", lookup_expr="in")
|
||||
provider_id = UUIDFilter(field_name="provider__id", lookup_expr="exact")
|
||||
provider_id__in = UUIDInFilter(field_name="provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
field_name="provider__provider", choices=Provider.ProviderChoices.choices
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="provider__provider",
|
||||
choices=Provider.ProviderChoices.choices,
|
||||
lookup_expr="in",
|
||||
)
|
||||
compliance_id = CharFilter(field_name="compliance_id", lookup_expr="exact")
|
||||
compliance_id__in = CharInFilter(field_name="compliance_id", lookup_expr="in")
|
||||
|
||||
class Meta:
|
||||
model = ThreatScoreSnapshot
|
||||
fields = {
|
||||
"scan": ["exact", "in"],
|
||||
"provider": ["exact", "in"],
|
||||
"compliance_id": ["exact", "in"],
|
||||
"inserted_at": ["date", "gte", "lte"],
|
||||
"overall_score": ["exact", "gte", "lte"],
|
||||
}
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
[
|
||||
{
|
||||
"model": "api.attackpathsscan",
|
||||
"pk": "a7f0f6de-6f8e-4b3a-8cbe-3f6dd9012345",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"provider": "b85601a8-4b45-4194-8135-03fb980ef428",
|
||||
"scan": "01920573-aa9c-73c9-bcda-f2e35c9b19d2",
|
||||
"state": "completed",
|
||||
"progress": 100,
|
||||
"update_tag": 1693586667,
|
||||
"graph_database": "db-a7f0f6de-6f8e-4b3a-8cbe-3f6dd9012345",
|
||||
"is_graph_database_deleted": false,
|
||||
"task": null,
|
||||
"inserted_at": "2024-09-01T17:24:37Z",
|
||||
"updated_at": "2024-09-01T17:44:37Z",
|
||||
"started_at": "2024-09-01T17:34:37Z",
|
||||
"completed_at": "2024-09-01T17:44:37Z",
|
||||
"duration": 269,
|
||||
"ingestion_exceptions": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.attackpathsscan",
|
||||
"pk": "4a2fb2af-8a60-4d7d-9cae-4ca65e098765",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555",
|
||||
"scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f",
|
||||
"state": "executing",
|
||||
"progress": 48,
|
||||
"update_tag": 1697625000,
|
||||
"graph_database": "db-4a2fb2af-8a60-4d7d-9cae-4ca65e098765",
|
||||
"is_graph_database_deleted": false,
|
||||
"task": null,
|
||||
"inserted_at": "2024-10-18T10:55:57Z",
|
||||
"updated_at": "2024-10-18T10:56:15Z",
|
||||
"started_at": "2024-10-18T10:56:05Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -22,7 +22,7 @@ class Migration(migrations.Migration):
|
||||
("kubernetes", "Kubernetes"),
|
||||
("m365", "M365"),
|
||||
("github", "GitHub"),
|
||||
("oci", "Oracle Cloud Infrastructure"),
|
||||
("oraclecloud", "Oracle Cloud Infrastructure"),
|
||||
("iac", "IaC"),
|
||||
],
|
||||
default="aws",
|
||||
|
||||
@@ -29,4 +29,8 @@ class Migration(migrations.Migration):
|
||||
default="aws",
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'mongodbatlas';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,170 @@
|
||||
# Generated by Django 5.1.13 on 2025-10-31 09:04
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0056_remove_provider_unique_provider_uids_and_more"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="ThreatScoreSnapshot",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
(
|
||||
"compliance_id",
|
||||
models.CharField(
|
||||
help_text="Compliance framework ID (e.g., 'prowler_threatscore_aws')",
|
||||
max_length=100,
|
||||
),
|
||||
),
|
||||
(
|
||||
"overall_score",
|
||||
models.DecimalField(
|
||||
decimal_places=2,
|
||||
help_text="Overall ThreatScore percentage (0-100)",
|
||||
max_digits=5,
|
||||
),
|
||||
),
|
||||
(
|
||||
"score_delta",
|
||||
models.DecimalField(
|
||||
blank=True,
|
||||
decimal_places=2,
|
||||
help_text="Score change compared to previous snapshot (positive = improvement)",
|
||||
max_digits=5,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"section_scores",
|
||||
models.JSONField(
|
||||
blank=True,
|
||||
default=dict,
|
||||
help_text="ThreatScore breakdown by section",
|
||||
),
|
||||
),
|
||||
(
|
||||
"critical_requirements",
|
||||
models.JSONField(
|
||||
blank=True,
|
||||
default=list,
|
||||
help_text="List of critical failed requirements (risk >= 4)",
|
||||
),
|
||||
),
|
||||
(
|
||||
"total_requirements",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Total number of requirements evaluated"
|
||||
),
|
||||
),
|
||||
(
|
||||
"passed_requirements",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Number of requirements with PASS status"
|
||||
),
|
||||
),
|
||||
(
|
||||
"failed_requirements",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Number of requirements with FAIL status"
|
||||
),
|
||||
),
|
||||
(
|
||||
"manual_requirements",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Number of requirements with MANUAL status"
|
||||
),
|
||||
),
|
||||
(
|
||||
"total_findings",
|
||||
models.IntegerField(
|
||||
default=0,
|
||||
help_text="Total number of findings across all requirements",
|
||||
),
|
||||
),
|
||||
(
|
||||
"passed_findings",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Number of findings with PASS status"
|
||||
),
|
||||
),
|
||||
(
|
||||
"failed_findings",
|
||||
models.IntegerField(
|
||||
default=0, help_text="Number of findings with FAIL status"
|
||||
),
|
||||
),
|
||||
(
|
||||
"provider",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="threatscore_snapshots",
|
||||
related_query_name="threatscore_snapshot",
|
||||
to="api.provider",
|
||||
),
|
||||
),
|
||||
(
|
||||
"scan",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="threatscore_snapshots",
|
||||
related_query_name="threatscore_snapshot",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "threatscore_snapshots",
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="threatscoresnapshot",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="threatscore_snap_t_scan_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="threatscoresnapshot",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id"], name="threatscore_snap_t_prov_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="threatscoresnapshot",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "inserted_at"], name="threatscore_snap_t_time_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="threatscoresnapshot",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_threatscoresnapshot",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,29 @@
|
||||
from django.contrib.postgres.operations import RemoveIndexConcurrently
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0057_threatscoresnapshot"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
RemoveIndexConcurrently(
|
||||
model_name="compliancerequirementoverview",
|
||||
name="cro_tenant_scan_idx",
|
||||
),
|
||||
RemoveIndexConcurrently(
|
||||
model_name="compliancerequirementoverview",
|
||||
name="cro_scan_comp_idx",
|
||||
),
|
||||
RemoveIndexConcurrently(
|
||||
model_name="compliancerequirementoverview",
|
||||
name="cro_scan_comp_req_idx",
|
||||
),
|
||||
RemoveIndexConcurrently(
|
||||
model_name="compliancerequirementoverview",
|
||||
name="cro_scan_comp_req_reg_idx",
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,75 @@
|
||||
# Generated by Django 5.1.13 on 2025-10-30 15:23
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0058_drop_redundant_compliance_requirement_indexes"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="ComplianceOverviewSummary",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
("compliance_id", models.TextField()),
|
||||
("requirements_passed", models.IntegerField(default=0)),
|
||||
("requirements_failed", models.IntegerField(default=0)),
|
||||
("requirements_manual", models.IntegerField(default=0)),
|
||||
("total_requirements", models.IntegerField(default=0)),
|
||||
(
|
||||
"scan",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="compliance_summaries",
|
||||
related_query_name="compliance_summary",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "compliance_overview_summaries",
|
||||
"abstract": False,
|
||||
"indexes": [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="cos_tenant_scan_idx"
|
||||
)
|
||||
],
|
||||
"constraints": [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "compliance_id"),
|
||||
name="unique_compliance_summary_per_scan",
|
||||
)
|
||||
],
|
||||
},
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="complianceoverviewsummary",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_complianceoverviewsummary",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,154 @@
|
||||
# Generated by Django 5.1.13 on 2025-11-06 16:20
|
||||
|
||||
import django.db.models.deletion
|
||||
|
||||
from django.db import migrations, models
|
||||
from uuid6 import uuid7
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0059_compliance_overview_summary"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="AttackPathsScan",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid7,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
("updated_at", models.DateTimeField(auto_now=True)),
|
||||
(
|
||||
"state",
|
||||
api.db_utils.StateEnumField(
|
||||
choices=[
|
||||
("available", "Available"),
|
||||
("scheduled", "Scheduled"),
|
||||
("executing", "Executing"),
|
||||
("completed", "Completed"),
|
||||
("failed", "Failed"),
|
||||
("cancelled", "Cancelled"),
|
||||
],
|
||||
default="available",
|
||||
),
|
||||
),
|
||||
("progress", models.IntegerField(default=0)),
|
||||
("started_at", models.DateTimeField(blank=True, null=True)),
|
||||
("completed_at", models.DateTimeField(blank=True, null=True)),
|
||||
(
|
||||
"duration",
|
||||
models.IntegerField(
|
||||
blank=True, help_text="Duration in seconds", null=True
|
||||
),
|
||||
),
|
||||
(
|
||||
"update_tag",
|
||||
models.BigIntegerField(
|
||||
blank=True,
|
||||
help_text="Cartography update tag (epoch)",
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
"graph_database",
|
||||
models.CharField(blank=True, max_length=63, null=True),
|
||||
),
|
||||
(
|
||||
"is_graph_database_deleted",
|
||||
models.BooleanField(default=False),
|
||||
),
|
||||
(
|
||||
"ingestion_exceptions",
|
||||
models.JSONField(blank=True, default=dict, null=True),
|
||||
),
|
||||
(
|
||||
"provider",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
to="api.provider",
|
||||
),
|
||||
),
|
||||
(
|
||||
"scan",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
to="api.scan",
|
||||
),
|
||||
),
|
||||
(
|
||||
"task",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
to="api.task",
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "attack_paths_scans",
|
||||
"abstract": False,
|
||||
"indexes": [
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "-inserted_at"],
|
||||
name="aps_prov_ins_desc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "state", "-inserted_at"],
|
||||
name="aps_state_ins_desc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="aps_scan_lookup_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="aps_active_graph_idx",
|
||||
include=["graph_database", "id"],
|
||||
condition=models.Q(("is_graph_database_deleted", False)),
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "-completed_at"],
|
||||
name="aps_completed_graph_idx",
|
||||
include=["graph_database", "id"],
|
||||
condition=models.Q(
|
||||
("state", "completed"),
|
||||
("is_graph_database_deleted", False),
|
||||
),
|
||||
),
|
||||
],
|
||||
},
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="attackpathsscan",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_attackpathsscan",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -616,6 +616,101 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
resource_name = "scans"
|
||||
|
||||
|
||||
class AttackPathsScan(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
all_objects = models.Manager()
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
|
||||
state = StateEnumField(choices=StateChoices.choices, default=StateChoices.AVAILABLE)
|
||||
progress = models.IntegerField(default=0)
|
||||
|
||||
# Timing
|
||||
started_at = models.DateTimeField(null=True, blank=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
duration = models.IntegerField(
|
||||
null=True, blank=True, help_text="Duration in seconds"
|
||||
)
|
||||
|
||||
# Relationship to the provider and optional prowler Scan and celery Task
|
||||
provider = models.ForeignKey(
|
||||
"Provider",
|
||||
on_delete=models.CASCADE,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
)
|
||||
scan = models.ForeignKey(
|
||||
"Scan",
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
)
|
||||
task = models.ForeignKey(
|
||||
"Task",
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
blank=True,
|
||||
related_name="attack_paths_scans",
|
||||
related_query_name="attack_paths_scan",
|
||||
)
|
||||
|
||||
# Cartography specific metadata
|
||||
update_tag = models.BigIntegerField(
|
||||
null=True, blank=True, help_text="Cartography update tag (epoch)"
|
||||
)
|
||||
graph_database = models.CharField(max_length=63, null=True, blank=True)
|
||||
is_graph_database_deleted = models.BooleanField(default=False)
|
||||
ingestion_exceptions = models.JSONField(default=dict, null=True, blank=True)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "attack_paths_scans"
|
||||
|
||||
constraints = [
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "-inserted_at"],
|
||||
name="aps_prov_ins_desc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "state", "-inserted_at"],
|
||||
name="aps_state_ins_desc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="aps_scan_lookup_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="aps_active_graph_idx",
|
||||
include=["graph_database", "id"],
|
||||
condition=Q(is_graph_database_deleted=False),
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "-completed_at"],
|
||||
name="aps_completed_graph_idx",
|
||||
include=["graph_database", "id"],
|
||||
condition=Q(
|
||||
state=StateChoices.COMPLETED,
|
||||
is_graph_database_deleted=False,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-scans"
|
||||
|
||||
|
||||
class ResourceTag(RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
@@ -1371,35 +1466,70 @@ class ComplianceRequirementOverview(RowLevelSecurityProtectedModel):
|
||||
),
|
||||
]
|
||||
indexes = [
|
||||
models.Index(fields=["tenant_id", "scan_id"], name="cro_tenant_scan_idx"),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "compliance_id"],
|
||||
name="cro_scan_comp_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "compliance_id", "region"],
|
||||
name="cro_scan_comp_reg_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "compliance_id", "requirement_id"],
|
||||
name="cro_scan_comp_req_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=[
|
||||
"tenant_id",
|
||||
"scan_id",
|
||||
"compliance_id",
|
||||
"requirement_id",
|
||||
"region",
|
||||
],
|
||||
name="cro_scan_comp_req_reg_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "compliance-requirements-overviews"
|
||||
|
||||
|
||||
class ComplianceOverviewSummary(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Pre-aggregated compliance overview aggregated across ALL regions.
|
||||
One row per (scan_id, compliance_id) combination.
|
||||
|
||||
This table optimizes the common case where users view overall compliance
|
||||
without filtering by region. For region-specific views, the detailed
|
||||
ComplianceRequirementOverview table is used instead.
|
||||
"""
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
|
||||
scan = models.ForeignKey(
|
||||
Scan,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="compliance_summaries",
|
||||
related_query_name="compliance_summary",
|
||||
)
|
||||
|
||||
compliance_id = models.TextField(blank=False)
|
||||
|
||||
# Pre-aggregated scores (computed across ALL regions)
|
||||
requirements_passed = models.IntegerField(default=0)
|
||||
requirements_failed = models.IntegerField(default=0)
|
||||
requirements_manual = models.IntegerField(default=0)
|
||||
total_requirements = models.IntegerField(default=0)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "compliance_overview_summaries"
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "scan_id", "compliance_id"),
|
||||
name="unique_compliance_summary_per_scan",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="cos_tenant_scan_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "compliance-overview-summaries"
|
||||
|
||||
|
||||
class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
all_objects = models.Manager()
|
||||
@@ -2239,3 +2369,134 @@ class LighthouseProviderModels(RowLevelSecurityProtectedModel):
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "lighthouse-models"
|
||||
|
||||
|
||||
class ThreatScoreSnapshot(RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
Stores historical ThreatScore metrics for a given scan.
|
||||
Snapshots are created automatically after each ThreatScore report generation.
|
||||
"""
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
|
||||
scan = models.ForeignKey(
|
||||
Scan,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="threatscore_snapshots",
|
||||
related_query_name="threatscore_snapshot",
|
||||
)
|
||||
|
||||
provider = models.ForeignKey(
|
||||
Provider,
|
||||
on_delete=models.CASCADE,
|
||||
related_name="threatscore_snapshots",
|
||||
related_query_name="threatscore_snapshot",
|
||||
)
|
||||
|
||||
compliance_id = models.CharField(
|
||||
max_length=100,
|
||||
blank=False,
|
||||
null=False,
|
||||
help_text="Compliance framework ID (e.g., 'prowler_threatscore_aws')",
|
||||
)
|
||||
|
||||
# Overall ThreatScore metrics
|
||||
overall_score = models.DecimalField(
|
||||
max_digits=5,
|
||||
decimal_places=2,
|
||||
help_text="Overall ThreatScore percentage (0-100)",
|
||||
)
|
||||
|
||||
# Score improvement/degradation compared to previous snapshot
|
||||
score_delta = models.DecimalField(
|
||||
max_digits=5,
|
||||
decimal_places=2,
|
||||
null=True,
|
||||
blank=True,
|
||||
help_text="Score change compared to previous snapshot (positive = improvement)",
|
||||
)
|
||||
|
||||
# Section breakdown stored as JSON
|
||||
# Format: {"1. IAM": 85.5, "2. Attack Surface": 92.3, ...}
|
||||
section_scores = models.JSONField(
|
||||
default=dict,
|
||||
blank=True,
|
||||
help_text="ThreatScore breakdown by section",
|
||||
)
|
||||
|
||||
# Critical requirements metadata stored as JSON
|
||||
# Format: [{"requirement_id": "...", "risk_level": 5, "weight": 150, ...}, ...]
|
||||
critical_requirements = models.JSONField(
|
||||
default=list,
|
||||
blank=True,
|
||||
help_text="List of critical failed requirements (risk >= 4)",
|
||||
)
|
||||
|
||||
# Summary statistics
|
||||
total_requirements = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Total number of requirements evaluated",
|
||||
)
|
||||
|
||||
passed_requirements = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of requirements with PASS status",
|
||||
)
|
||||
|
||||
failed_requirements = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of requirements with FAIL status",
|
||||
)
|
||||
|
||||
manual_requirements = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of requirements with MANUAL status",
|
||||
)
|
||||
|
||||
total_findings = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Total number of findings across all requirements",
|
||||
)
|
||||
|
||||
passed_findings = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of findings with PASS status",
|
||||
)
|
||||
|
||||
failed_findings = models.IntegerField(
|
||||
default=0,
|
||||
help_text="Number of findings with FAIL status",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return f"ThreatScore {self.overall_score}% for scan {self.scan_id} ({self.inserted_at})"
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "threatscore_snapshots"
|
||||
|
||||
constraints = [
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="threatscore_snap_t_scan_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="threatscore_snap_t_prov_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "inserted_at"],
|
||||
name="threatscore_snap_t_time_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "threatscore-snapshots"
|
||||
|
||||
@@ -0,0 +1,172 @@
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from rest_framework.exceptions import APIException, ValidationError
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.attack_paths import views_helpers
|
||||
|
||||
|
||||
def test_normalize_run_payload_extracts_attributes_section():
|
||||
payload = {
|
||||
"data": {
|
||||
"id": "ignored",
|
||||
"attributes": {
|
||||
"id": "aws-rds",
|
||||
"parameters": {"ip": "192.0.2.0"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
result = views_helpers.normalize_run_payload(payload)
|
||||
|
||||
assert result == {"id": "aws-rds", "parameters": {"ip": "192.0.2.0"}}
|
||||
|
||||
|
||||
def test_normalize_run_payload_passthrough_for_non_dict():
|
||||
sentinel = "not-a-dict"
|
||||
assert views_helpers.normalize_run_payload(sentinel) is sentinel
|
||||
|
||||
|
||||
def test_prepare_query_parameters_includes_provider_and_casts(
|
||||
attack_paths_query_definition_factory,
|
||||
):
|
||||
definition = attack_paths_query_definition_factory(cast_type=int)
|
||||
result = views_helpers.prepare_query_parameters(
|
||||
definition,
|
||||
{"limit": "5"},
|
||||
provider_uid="123456789012",
|
||||
)
|
||||
|
||||
assert result["provider_uid"] == "123456789012"
|
||||
assert result["limit"] == 5
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provided,expected_message",
|
||||
[
|
||||
({}, "Missing required parameter"),
|
||||
({"limit": 10, "extra": True}, "Unknown parameter"),
|
||||
],
|
||||
)
|
||||
def test_prepare_query_parameters_validates_names(
|
||||
attack_paths_query_definition_factory, provided, expected_message
|
||||
):
|
||||
definition = attack_paths_query_definition_factory()
|
||||
|
||||
with pytest.raises(ValidationError) as exc:
|
||||
views_helpers.prepare_query_parameters(definition, provided, provider_uid="1")
|
||||
|
||||
assert expected_message in str(exc.value)
|
||||
|
||||
|
||||
def test_prepare_query_parameters_validates_cast(
|
||||
attack_paths_query_definition_factory,
|
||||
):
|
||||
definition = attack_paths_query_definition_factory(cast_type=int)
|
||||
|
||||
with pytest.raises(ValidationError) as exc:
|
||||
views_helpers.prepare_query_parameters(
|
||||
definition,
|
||||
{"limit": "not-an-int"},
|
||||
provider_uid="1",
|
||||
)
|
||||
|
||||
assert "Invalid value" in str(exc.value)
|
||||
|
||||
|
||||
def test_execute_attack_paths_query_serializes_graph(
|
||||
attack_paths_query_definition_factory, attack_paths_graph_stub_classes
|
||||
):
|
||||
definition = attack_paths_query_definition_factory(
|
||||
id="aws-rds",
|
||||
name="RDS",
|
||||
description="",
|
||||
cypher="MATCH (n) RETURN n",
|
||||
parameters=[],
|
||||
)
|
||||
parameters = {"provider_uid": "123"}
|
||||
attack_paths_scan = SimpleNamespace(graph_database="tenant-db")
|
||||
|
||||
node = attack_paths_graph_stub_classes.Node(
|
||||
element_id="node-1",
|
||||
labels=["AWSAccount"],
|
||||
properties={
|
||||
"name": "account",
|
||||
"complex": {
|
||||
"items": [
|
||||
attack_paths_graph_stub_classes.NativeValue("value"),
|
||||
{"nested": 1},
|
||||
]
|
||||
},
|
||||
},
|
||||
)
|
||||
relationship = attack_paths_graph_stub_classes.Relationship(
|
||||
element_id="rel-1",
|
||||
rel_type="OWNS",
|
||||
start_node=node,
|
||||
end_node=attack_paths_graph_stub_classes.Node("node-2", ["RDSInstance"], {}),
|
||||
properties={"weight": 1},
|
||||
)
|
||||
graph = SimpleNamespace(nodes=[node], relationships=[relationship])
|
||||
|
||||
run_result = MagicMock()
|
||||
run_result.graph.return_value = graph
|
||||
|
||||
session = MagicMock()
|
||||
session.run.return_value = run_result
|
||||
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.return_value = session
|
||||
session_ctx.__exit__.return_value = False
|
||||
|
||||
with patch(
|
||||
"api.attack_paths.views_helpers.graph_database.get_session",
|
||||
return_value=session_ctx,
|
||||
) as mock_get_session:
|
||||
result = views_helpers.execute_attack_paths_query(
|
||||
attack_paths_scan, definition, parameters
|
||||
)
|
||||
|
||||
mock_get_session.assert_called_once_with("tenant-db")
|
||||
session.run.assert_called_once_with(definition.cypher, parameters)
|
||||
assert result["nodes"][0]["id"] == "node-1"
|
||||
assert result["nodes"][0]["properties"]["complex"]["items"][0] == "value"
|
||||
assert result["relationships"][0]["label"] == "OWNS"
|
||||
|
||||
|
||||
def test_execute_attack_paths_query_wraps_graph_errors(
|
||||
attack_paths_query_definition_factory,
|
||||
):
|
||||
definition = attack_paths_query_definition_factory(
|
||||
id="aws-rds",
|
||||
name="RDS",
|
||||
description="",
|
||||
cypher="MATCH (n) RETURN n",
|
||||
parameters=[],
|
||||
)
|
||||
attack_paths_scan = SimpleNamespace(graph_database="tenant-db")
|
||||
parameters = {"provider_uid": "123"}
|
||||
|
||||
class ExplodingContext:
|
||||
def __enter__(self):
|
||||
raise graph_database.GraphDatabaseQueryException("boom")
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
with (
|
||||
patch(
|
||||
"api.attack_paths.views_helpers.graph_database.get_session",
|
||||
return_value=ExplodingContext(),
|
||||
),
|
||||
patch("api.attack_paths.views_helpers.logger") as mock_logger,
|
||||
):
|
||||
with pytest.raises(APIException):
|
||||
views_helpers.execute_attack_paths_query(
|
||||
attack_paths_scan, definition, parameters
|
||||
)
|
||||
|
||||
mock_logger.error.assert_called_once()
|
||||
@@ -21,6 +21,7 @@ from prowler.providers.aws.lib.security_hub.security_hub import SecurityHubConne
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.github.github_provider import GithubProvider
|
||||
from prowler.providers.iac.iac_provider import IacProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
from prowler.providers.mongodbatlas.mongodbatlas_provider import MongodbatlasProvider
|
||||
@@ -114,6 +115,7 @@ class TestReturnProwlerProvider:
|
||||
(Provider.ProviderChoices.GITHUB.value, GithubProvider),
|
||||
(Provider.ProviderChoices.MONGODBATLAS.value, MongodbatlasProvider),
|
||||
(Provider.ProviderChoices.ORACLECLOUD.value, OraclecloudProvider),
|
||||
(Provider.ProviderChoices.IAC.value, IacProvider),
|
||||
],
|
||||
)
|
||||
def test_return_prowler_provider(self, provider_type, expected_provider):
|
||||
@@ -254,6 +256,72 @@ class TestGetProwlerProviderKwargs:
|
||||
expected_result = {**secret_dict, "mutelist_content": {"key": "value"}}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider(self):
|
||||
"""Test that IaC provider gets correct kwargs with repository URL."""
|
||||
provider_uid = "https://github.com/org/repo"
|
||||
secret_dict = {"access_token": "test_token"}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider)
|
||||
|
||||
expected_result = {
|
||||
"scan_repository_url": provider_uid,
|
||||
"oauth_app_token": "test_token",
|
||||
}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider_without_token(self):
|
||||
"""Test that IaC provider works without access token for public repos."""
|
||||
provider_uid = "https://github.com/org/public-repo"
|
||||
secret_dict = {}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider)
|
||||
|
||||
expected_result = {"scan_repository_url": provider_uid}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_iac_provider_ignores_mutelist(self):
|
||||
"""Test that IaC provider does NOT receive mutelist_content.
|
||||
|
||||
IaC provider uses Trivy's built-in mutelist logic, so it should not
|
||||
receive mutelist_content even when a mutelist processor is configured.
|
||||
"""
|
||||
provider_uid = "https://github.com/org/repo"
|
||||
secret_dict = {"access_token": "test_token"}
|
||||
secret_mock = MagicMock()
|
||||
secret_mock.secret = secret_dict
|
||||
|
||||
mutelist_processor = MagicMock()
|
||||
mutelist_processor.configuration = {"Mutelist": {"key": "value"}}
|
||||
|
||||
provider = MagicMock()
|
||||
provider.provider = Provider.ProviderChoices.IAC.value
|
||||
provider.secret = secret_mock
|
||||
provider.uid = provider_uid
|
||||
|
||||
result = get_prowler_provider_kwargs(provider, mutelist_processor)
|
||||
|
||||
# IaC provider should NOT have mutelist_content
|
||||
assert "mutelist_content" not in result
|
||||
expected_result = {
|
||||
"scan_repository_url": provider_uid,
|
||||
"oauth_app_token": "test_token",
|
||||
}
|
||||
assert result == expected_result
|
||||
|
||||
def test_get_prowler_provider_kwargs_unsupported_provider(self):
|
||||
# Setup
|
||||
provider_uid = "provider_uid"
|
||||
|
||||
@@ -158,7 +158,8 @@ def get_prowler_provider_kwargs(
|
||||
|
||||
if mutelist_processor:
|
||||
mutelist_content = mutelist_processor.configuration.get("Mutelist", {})
|
||||
if mutelist_content:
|
||||
# IaC provider doesn't support mutelist (uses Trivy's built-in logic)
|
||||
if mutelist_content and provider.provider != Provider.ProviderChoices.IAC.value:
|
||||
prowler_provider_kwargs["mutelist_content"] = mutelist_content
|
||||
|
||||
return prowler_provider_kwargs
|
||||
|
||||
@@ -21,6 +21,7 @@ from rest_framework_simplejwt.tokens import RefreshToken
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import ConflictException
|
||||
from api.models import (
|
||||
AttackPathsScan,
|
||||
Finding,
|
||||
Integration,
|
||||
IntegrationProviderRelationship,
|
||||
@@ -47,6 +48,7 @@ from api.models import (
|
||||
StatusChoices,
|
||||
Task,
|
||||
TenantAPIKey,
|
||||
ThreatScoreSnapshot,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
)
|
||||
@@ -1126,6 +1128,109 @@ class ScanComplianceReportSerializer(serializers.Serializer):
|
||||
fields = ["id", "name"]
|
||||
|
||||
|
||||
class AttackPathsScanSerializer(RLSSerializer):
|
||||
state = StateEnumSerializerField(read_only=True)
|
||||
provider_alias = serializers.SerializerMethodField(read_only=True)
|
||||
provider_type = serializers.SerializerMethodField(read_only=True)
|
||||
provider_uid = serializers.SerializerMethodField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = AttackPathsScan
|
||||
fields = [
|
||||
"id",
|
||||
"state",
|
||||
"progress",
|
||||
"provider",
|
||||
"provider_alias",
|
||||
"provider_type",
|
||||
"provider_uid",
|
||||
"scan",
|
||||
"task",
|
||||
"inserted_at",
|
||||
"started_at",
|
||||
"completed_at",
|
||||
"duration",
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"provider": "api.v1.serializers.ProviderIncludeSerializer",
|
||||
"scan": "api.v1.serializers.ScanIncludeSerializer",
|
||||
"task": "api.v1.serializers.TaskSerializer",
|
||||
}
|
||||
|
||||
def get_provider_alias(self, obj):
|
||||
provider = getattr(obj, "provider", None)
|
||||
return provider.alias if provider else None
|
||||
|
||||
def get_provider_type(self, obj):
|
||||
provider = getattr(obj, "provider", None)
|
||||
return provider.provider if provider else None
|
||||
|
||||
def get_provider_uid(self, obj):
|
||||
provider = getattr(obj, "provider", None)
|
||||
return provider.uid if provider else None
|
||||
|
||||
|
||||
class AttackPathsQueryParameterSerializer(serializers.Serializer):
|
||||
name = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
data_type = serializers.CharField(default="string")
|
||||
description = serializers.CharField(allow_null=True, required=False)
|
||||
placeholder = serializers.CharField(allow_null=True, required=False)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query-parameter"
|
||||
|
||||
|
||||
class AttackPathsQuerySerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
name = serializers.CharField()
|
||||
description = serializers.CharField()
|
||||
provider = serializers.CharField()
|
||||
parameters = AttackPathsQueryParameterSerializer(many=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query"
|
||||
|
||||
|
||||
class AttackPathsQueryRunRequestSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
parameters = serializers.DictField(
|
||||
child=serializers.JSONField(), allow_empty=True, required=False
|
||||
)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query-run-request"
|
||||
|
||||
|
||||
class AttackPathsNodeSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
labels = serializers.ListField(child=serializers.CharField())
|
||||
properties = serializers.DictField(child=serializers.JSONField())
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query-result-node"
|
||||
|
||||
|
||||
class AttackPathsRelationshipSerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
label = serializers.CharField()
|
||||
source = serializers.CharField()
|
||||
target = serializers.CharField()
|
||||
properties = serializers.DictField(child=serializers.JSONField())
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query-result-relationship"
|
||||
|
||||
|
||||
class AttackPathsQueryResultSerializer(serializers.Serializer):
|
||||
nodes = AttackPathsNodeSerializer(many=True)
|
||||
relationships = AttackPathsRelationshipSerializer(many=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-query-result"
|
||||
|
||||
|
||||
class ResourceTagSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ResourceTag model
|
||||
@@ -1166,11 +1271,17 @@ class ResourceSerializer(RLSSerializer):
|
||||
"findings",
|
||||
"failed_findings_count",
|
||||
"url",
|
||||
"metadata",
|
||||
"details",
|
||||
"partition",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"metadata": {"read_only": True},
|
||||
"details": {"read_only": True},
|
||||
"partition": {"read_only": True},
|
||||
}
|
||||
|
||||
included_serializers = {
|
||||
@@ -1227,11 +1338,15 @@ class ResourceIncludeSerializer(RLSSerializer):
|
||||
"service",
|
||||
"type_",
|
||||
"tags",
|
||||
"details",
|
||||
"partition",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"details": {"read_only": True},
|
||||
"partition": {"read_only": True},
|
||||
}
|
||||
|
||||
@extend_schema_field(
|
||||
@@ -2218,6 +2333,30 @@ class OverviewServiceSerializer(serializers.Serializer):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class OverviewRegionSerializer(serializers.Serializer):
|
||||
id = serializers.SerializerMethodField()
|
||||
provider_type = serializers.CharField()
|
||||
region = serializers.CharField()
|
||||
total = serializers.IntegerField()
|
||||
_pass = serializers.IntegerField()
|
||||
fail = serializers.IntegerField()
|
||||
muted = serializers.IntegerField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "regions-overview"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.fields["pass"] = self.fields.pop("_pass")
|
||||
|
||||
def get_id(self, obj):
|
||||
"""Generate unique ID from provider_type and region."""
|
||||
return f"{obj['provider_type']}:{obj['region']}"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
# Schedules
|
||||
|
||||
|
||||
@@ -3626,3 +3765,64 @@ class MuteRuleUpdateSerializer(BaseWriteSerializer):
|
||||
):
|
||||
raise ValidationError("A mute rule with this name already exists.")
|
||||
return value
|
||||
|
||||
|
||||
# ThreatScore Snapshots
|
||||
|
||||
|
||||
class ThreatScoreSnapshotSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for ThreatScore snapshots.
|
||||
Read-only serializer for retrieving historical ThreatScore metrics.
|
||||
"""
|
||||
|
||||
id = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ThreatScoreSnapshot
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"scan",
|
||||
"provider",
|
||||
"compliance_id",
|
||||
"overall_score",
|
||||
"score_delta",
|
||||
"section_scores",
|
||||
"critical_requirements",
|
||||
"total_requirements",
|
||||
"passed_requirements",
|
||||
"failed_requirements",
|
||||
"manual_requirements",
|
||||
"total_findings",
|
||||
"passed_findings",
|
||||
"failed_findings",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"scan": {"read_only": True},
|
||||
"provider": {"read_only": True},
|
||||
"compliance_id": {"read_only": True},
|
||||
"overall_score": {"read_only": True},
|
||||
"score_delta": {"read_only": True},
|
||||
"section_scores": {"read_only": True},
|
||||
"critical_requirements": {"read_only": True},
|
||||
"total_requirements": {"read_only": True},
|
||||
"passed_requirements": {"read_only": True},
|
||||
"failed_requirements": {"read_only": True},
|
||||
"manual_requirements": {"read_only": True},
|
||||
"total_findings": {"read_only": True},
|
||||
"passed_findings": {"read_only": True},
|
||||
"failed_findings": {"read_only": True},
|
||||
}
|
||||
|
||||
included_serializers = {
|
||||
"scan": "api.v1.serializers.ScanIncludeSerializer",
|
||||
"provider": "api.v1.serializers.ProviderIncludeSerializer",
|
||||
}
|
||||
|
||||
def get_id(self, obj):
|
||||
if getattr(obj, "_aggregated", False):
|
||||
return "n/a"
|
||||
return str(obj.id)
|
||||
|
||||
@@ -4,6 +4,7 @@ from drf_spectacular.views import SpectacularRedocView
|
||||
from rest_framework_nested import routers
|
||||
|
||||
from api.v1.views import (
|
||||
AttackPathsScanViewSet,
|
||||
ComplianceOverviewViewSet,
|
||||
CustomSAMLLoginView,
|
||||
CustomTokenObtainView,
|
||||
@@ -53,6 +54,9 @@ router.register(r"tenants", TenantViewSet, basename="tenant")
|
||||
router.register(r"providers", ProviderViewSet, basename="provider")
|
||||
router.register(r"provider-groups", ProviderGroupViewSet, basename="providergroup")
|
||||
router.register(r"scans", ScanViewSet, basename="scan")
|
||||
router.register(
|
||||
r"attack-paths-scans", AttackPathsScanViewSet, basename="attack-paths-scans"
|
||||
)
|
||||
router.register(r"tasks", TaskViewSet, basename="task")
|
||||
router.register(r"resources", ResourceViewSet, basename="resource")
|
||||
router.register(r"findings", FindingViewSet, basename="finding")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import warnings
|
||||
|
||||
from celery import Celery, Task
|
||||
|
||||
from config.env import env
|
||||
|
||||
# Suppress specific warnings from django-rest-auth: https://github.com/iMerica/dj-rest-auth/issues/684
|
||||
|
||||
@@ -36,6 +36,12 @@ DATABASES = {
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
"neo4j": {
|
||||
"HOST": env.str("NEO4J_HOST", "neo4j"),
|
||||
"PORT": env.str("NEO4J_PORT", "7687"),
|
||||
"USER": env.str("NEO4J_USER", "neo4j"),
|
||||
"PASSWORD": env.str("NEO4J_PASSWORD", "neo4j_password"),
|
||||
},
|
||||
}
|
||||
|
||||
DATABASES["default"] = DATABASES["prowler_user"]
|
||||
|
||||
@@ -37,6 +37,12 @@ DATABASES = {
|
||||
"HOST": env("POSTGRES_REPLICA_HOST", default=default_db_host),
|
||||
"PORT": env("POSTGRES_REPLICA_PORT", default=default_db_port),
|
||||
},
|
||||
"neo4j": {
|
||||
"HOST": env.str("NEO4J_HOST"),
|
||||
"PORT": env.str("NEO4J_PORT"),
|
||||
"USER": env.str("NEO4J_USER"),
|
||||
"PASSWORD": env.str("NEO4J_PASSWORD"),
|
||||
},
|
||||
}
|
||||
|
||||
DATABASES["default"] = DATABASES["prowler_user"]
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import logging
|
||||
from types import SimpleNamespace
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from allauth.socialaccount.models import SocialLogin
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection
|
||||
@@ -11,10 +14,14 @@ from django.urls import reverse
|
||||
from django_celery_results.models import TaskResult
|
||||
from rest_framework import status
|
||||
from rest_framework.test import APIClient
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
|
||||
from api.attack_paths import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
)
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
AttackPathsScan,
|
||||
ComplianceOverview,
|
||||
ComplianceRequirementOverview,
|
||||
Finding,
|
||||
@@ -47,6 +54,7 @@ from api.rls import Tenant
|
||||
from api.v1.serializers import TokenSerializer
|
||||
from prowler.lib.check.models import Severity
|
||||
from prowler.lib.outputs.finding import Status
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
|
||||
TODAY = str(datetime.today().date())
|
||||
API_JSON_CONTENT_TYPE = "application/vnd.api+json"
|
||||
@@ -159,22 +167,20 @@ def create_test_user_rbac_no_roles(django_db_setup, django_db_blocker, tenants_f
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user_rbac_limited(django_db_setup, django_db_blocker):
|
||||
def create_test_user_rbac_limited(django_db_setup, django_db_blocker, tenants_fixture):
|
||||
with django_db_blocker.unblock():
|
||||
user = User.objects.create_user(
|
||||
name="testing_limited",
|
||||
email="rbac_limited@rbac.com",
|
||||
password=TEST_PASSWORD,
|
||||
)
|
||||
tenant = Tenant.objects.create(
|
||||
name="Tenant Test",
|
||||
)
|
||||
tenant = tenants_fixture[0]
|
||||
Membership.objects.create(
|
||||
user=user,
|
||||
tenant=tenant,
|
||||
role=Membership.RoleChoices.OWNER,
|
||||
)
|
||||
Role.objects.create(
|
||||
role = Role.objects.create(
|
||||
name="limited",
|
||||
tenant_id=tenant.id,
|
||||
manage_users=False,
|
||||
@@ -187,7 +193,7 @@ def create_test_user_rbac_limited(django_db_setup, django_db_blocker):
|
||||
)
|
||||
UserRoleRelationship.objects.create(
|
||||
user=user,
|
||||
role=Role.objects.get(name="limited"),
|
||||
role=role,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
return user
|
||||
@@ -1108,8 +1114,8 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
region="region1",
|
||||
_pass=1,
|
||||
fail=0,
|
||||
muted=0,
|
||||
total=1,
|
||||
muted=2,
|
||||
total=3,
|
||||
new=1,
|
||||
changed=0,
|
||||
unchanged=0,
|
||||
@@ -1117,7 +1123,7 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
fail_changed=0,
|
||||
pass_new=1,
|
||||
pass_changed=0,
|
||||
muted_new=0,
|
||||
muted_new=2,
|
||||
muted_changed=0,
|
||||
scan=scan,
|
||||
)
|
||||
@@ -1130,8 +1136,8 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
region="region2",
|
||||
_pass=0,
|
||||
fail=1,
|
||||
muted=1,
|
||||
total=2,
|
||||
muted=3,
|
||||
total=4,
|
||||
new=2,
|
||||
changed=0,
|
||||
unchanged=0,
|
||||
@@ -1139,7 +1145,7 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
fail_changed=0,
|
||||
pass_new=0,
|
||||
pass_changed=0,
|
||||
muted_new=1,
|
||||
muted_new=3,
|
||||
muted_changed=0,
|
||||
scan=scan,
|
||||
)
|
||||
@@ -1152,8 +1158,8 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
region="region1",
|
||||
_pass=1,
|
||||
fail=0,
|
||||
muted=0,
|
||||
total=1,
|
||||
muted=1,
|
||||
total=2,
|
||||
new=1,
|
||||
changed=0,
|
||||
unchanged=0,
|
||||
@@ -1161,7 +1167,7 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
fail_changed=0,
|
||||
pass_new=1,
|
||||
pass_changed=0,
|
||||
muted_new=0,
|
||||
muted_new=1,
|
||||
muted_changed=0,
|
||||
scan=scan,
|
||||
)
|
||||
@@ -1469,6 +1475,104 @@ def mute_rules_fixture(tenants_fixture, create_test_user, findings_fixture):
|
||||
return mute_rule1, mute_rule2
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def create_attack_paths_scan():
|
||||
"""Factory fixture to create Attack Paths scans for tests."""
|
||||
|
||||
def _create(
|
||||
provider,
|
||||
*,
|
||||
scan=None,
|
||||
state=StateChoices.COMPLETED,
|
||||
progress=0,
|
||||
graph_database="tenant-db",
|
||||
**extra_fields,
|
||||
):
|
||||
scan_instance = scan or Scan.objects.create(
|
||||
name=extra_fields.pop("scan_name", "Attack Paths Supporting Scan"),
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=extra_fields.pop("scan_state", StateChoices.COMPLETED),
|
||||
tenant_id=provider.tenant_id,
|
||||
)
|
||||
|
||||
payload = {
|
||||
"tenant_id": provider.tenant_id,
|
||||
"provider": provider,
|
||||
"scan": scan_instance,
|
||||
"state": state,
|
||||
"progress": progress,
|
||||
"graph_database": graph_database,
|
||||
}
|
||||
payload.update(extra_fields)
|
||||
|
||||
return AttackPathsScan.objects.create(**payload)
|
||||
|
||||
return _create
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def attack_paths_query_definition_factory():
|
||||
"""Factory fixture for building Attack Paths query definitions."""
|
||||
|
||||
def _create(**overrides):
|
||||
cast_type = overrides.pop("cast_type", str)
|
||||
parameters = overrides.pop(
|
||||
"parameters",
|
||||
[
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="limit",
|
||||
label="Limit",
|
||||
cast=cast_type,
|
||||
)
|
||||
],
|
||||
)
|
||||
definition_payload = {
|
||||
"id": "aws-test",
|
||||
"name": "Attack Paths Test Query",
|
||||
"description": "Synthetic Attack Paths definition for tests.",
|
||||
"provider": "aws",
|
||||
"cypher": "RETURN 1",
|
||||
"parameters": parameters,
|
||||
}
|
||||
definition_payload.update(overrides)
|
||||
return AttackPathsQueryDefinition(**definition_payload)
|
||||
|
||||
return _create
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def attack_paths_graph_stub_classes():
|
||||
"""Provide lightweight graph element stubs for Attack Paths serialization tests."""
|
||||
|
||||
class AttackPathsNativeValue:
|
||||
def __init__(self, value):
|
||||
self._value = value
|
||||
|
||||
def to_native(self):
|
||||
return self._value
|
||||
|
||||
class AttackPathsNode:
|
||||
def __init__(self, element_id, labels, properties):
|
||||
self.element_id = element_id
|
||||
self.labels = labels
|
||||
self._properties = properties
|
||||
|
||||
class AttackPathsRelationship:
|
||||
def __init__(self, element_id, rel_type, start_node, end_node, properties):
|
||||
self.element_id = element_id
|
||||
self.type = rel_type
|
||||
self.start_node = start_node
|
||||
self.end_node = end_node
|
||||
self._properties = properties
|
||||
|
||||
return SimpleNamespace(
|
||||
NativeValue=AttackPathsNativeValue,
|
||||
Node=AttackPathsNode,
|
||||
Relationship=AttackPathsRelationship,
|
||||
)
|
||||
|
||||
|
||||
def get_authorization_header(access_token: str) -> dict:
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
||||
|
||||
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 94 KiB |
@@ -7,6 +7,7 @@ from tasks.tasks import perform_scheduled_scan_task
|
||||
from api.db_utils import rls_transaction
|
||||
from api.exceptions import ConflictException
|
||||
from api.models import Provider, Scan, StateChoices
|
||||
from tasks.jobs.attack_paths import db_utils as attack_paths_db_utils
|
||||
|
||||
|
||||
def schedule_provider_scan(provider_instance: Provider):
|
||||
@@ -39,6 +40,12 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
scheduled_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
attack_paths_db_utils.create_attack_paths_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scheduled_scan.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
|
||||
# Schedule the task
|
||||
periodic_task_instance = PeriodicTask.objects.create(
|
||||
interval=schedule,
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
from tasks.jobs.attack_paths.scan import run as attack_paths_scan
|
||||
|
||||
__all__ = [
|
||||
"attack_paths_scan",
|
||||
]
|
||||
@@ -0,0 +1,237 @@
|
||||
# Portions of this file are based on code from the Cartography project
|
||||
# (https://github.com/cartography-cncf/cartography), which is licensed under the Apache 2.0 License.
|
||||
|
||||
from typing import Any
|
||||
|
||||
import aioboto3
|
||||
import boto3
|
||||
import neo4j
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from cartography.intel import aws as cartography_aws
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.models import (
|
||||
AttackPathsScan as ProwlerAPIAttackPathsScan,
|
||||
Provider as ProwlerAPIProvider,
|
||||
)
|
||||
from prowler.providers.common.provider import Provider as ProwlerSDKProvider
|
||||
from tasks.jobs.attack_paths import db_utils, utils
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def start_aws_ingestion(
|
||||
neo4j_session: neo4j.Session,
|
||||
cartography_config: CartographyConfig,
|
||||
prowler_api_provider: ProwlerAPIProvider,
|
||||
prowler_sdk_provider: ProwlerSDKProvider,
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
) -> dict[str, dict[str, str]]:
|
||||
"""
|
||||
Code based on Cartography version 0.122.0, specifically on `cartography.intel.aws.__init__.py`.
|
||||
|
||||
For the scan progress updates:
|
||||
- The caller of this function (`tasks.jobs.attack_paths.scan.run`) has set it to 2.
|
||||
- When the control returns to the caller, it will be set to 95.
|
||||
"""
|
||||
|
||||
# Initialize variables common to all jobs
|
||||
common_job_parameters = {
|
||||
"UPDATE_TAG": cartography_config.update_tag,
|
||||
"permission_relationships_file": cartography_config.permission_relationships_file,
|
||||
"aws_guardduty_severity_threshold": cartography_config.aws_guardduty_severity_threshold,
|
||||
"aws_cloudtrail_management_events_lookback_hours": cartography_config.aws_cloudtrail_management_events_lookback_hours,
|
||||
"experimental_aws_inspector_batch": cartography_config.experimental_aws_inspector_batch,
|
||||
}
|
||||
|
||||
boto3_session = get_boto3_session(prowler_api_provider, prowler_sdk_provider)
|
||||
regions: list[str] = list(prowler_sdk_provider._enabled_regions)
|
||||
requested_syncs = list(cartography_aws.RESOURCE_FUNCTIONS.keys())
|
||||
|
||||
sync_args = cartography_aws._build_aws_sync_kwargs(
|
||||
neo4j_session,
|
||||
boto3_session,
|
||||
regions,
|
||||
prowler_api_provider.uid,
|
||||
cartography_config.update_tag,
|
||||
common_job_parameters,
|
||||
)
|
||||
|
||||
# Starting with sync functions
|
||||
cartography_aws.organizations.sync(
|
||||
neo4j_session,
|
||||
{prowler_api_provider.alias: prowler_api_provider.uid},
|
||||
cartography_config.update_tag,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 3)
|
||||
|
||||
# Adding an extra field
|
||||
common_job_parameters["AWS_ID"] = prowler_api_provider.uid
|
||||
|
||||
cartography_aws._autodiscover_accounts(
|
||||
neo4j_session,
|
||||
boto3_session,
|
||||
prowler_api_provider.uid,
|
||||
cartography_config.update_tag,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 4)
|
||||
|
||||
failed_syncs = sync_aws_account(
|
||||
prowler_api_provider, requested_syncs, sync_args, attack_paths_scan
|
||||
)
|
||||
|
||||
if "permission_relationships" in requested_syncs:
|
||||
cartography_aws.RESOURCE_FUNCTIONS["permission_relationships"](**sync_args)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 88)
|
||||
|
||||
if "resourcegroupstaggingapi" in requested_syncs:
|
||||
cartography_aws.RESOURCE_FUNCTIONS["resourcegroupstaggingapi"](**sync_args)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 89)
|
||||
|
||||
cartography_aws.run_scoped_analysis_job(
|
||||
"aws_ec2_iaminstanceprofile.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 90)
|
||||
|
||||
cartography_aws.run_analysis_job(
|
||||
"aws_lambda_ecr.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 91)
|
||||
|
||||
cartography_aws.merge_module_sync_metadata(
|
||||
neo4j_session,
|
||||
group_type="AWSAccount",
|
||||
group_id=prowler_api_provider.uid,
|
||||
synced_type="AWSAccount",
|
||||
update_tag=cartography_config.update_tag,
|
||||
stat_handler=cartography_aws.stat_handler,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 92)
|
||||
|
||||
# Removing the added extra field
|
||||
del common_job_parameters["AWS_ID"]
|
||||
|
||||
cartography_aws.run_cleanup_job(
|
||||
"aws_post_ingestion_principals_cleanup.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
|
||||
|
||||
cartography_aws._perform_aws_analysis(
|
||||
requested_syncs, neo4j_session, common_job_parameters
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
|
||||
|
||||
return failed_syncs
|
||||
|
||||
|
||||
def get_boto3_session(
|
||||
prowler_api_provider: ProwlerAPIProvider, prowler_sdk_provider: ProwlerSDKProvider
|
||||
) -> boto3.Session:
|
||||
boto3_session = prowler_sdk_provider.session.current_session
|
||||
|
||||
aws_accounts_from_session = cartography_aws.organizations.get_aws_account_default(
|
||||
boto3_session
|
||||
)
|
||||
if not aws_accounts_from_session:
|
||||
raise Exception(
|
||||
"No valid AWS credentials could be found. No AWS accounts can be synced."
|
||||
)
|
||||
|
||||
aws_account_id_from_session = list(aws_accounts_from_session.values())[0]
|
||||
if prowler_api_provider.uid != aws_account_id_from_session:
|
||||
raise Exception(
|
||||
f"Provider {prowler_api_provider.uid} doesn't match AWS account {aws_account_id_from_session}."
|
||||
)
|
||||
|
||||
if boto3_session.region_name is None:
|
||||
global_region = prowler_sdk_provider.get_global_region()
|
||||
boto3_session._session.set_config_variable("region", global_region)
|
||||
|
||||
return boto3_session
|
||||
|
||||
|
||||
def get_aioboto3_session(boto3_session: boto3.Session) -> aioboto3.Session:
|
||||
return aioboto3.Session(botocore_session=boto3_session._session)
|
||||
|
||||
|
||||
def sync_aws_account(
|
||||
prowler_api_provider: ProwlerAPIProvider,
|
||||
requested_syncs: list[str],
|
||||
sync_args: dict[str, Any],
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
) -> dict[str, str]:
|
||||
current_progress = 4 # `cartography_aws._autodiscover_accounts`
|
||||
max_progress = (
|
||||
87 # `cartography_aws.RESOURCE_FUNCTIONS["permission_relationships"]` - 1
|
||||
)
|
||||
n_steps = (
|
||||
len(requested_syncs) - 2
|
||||
) # Excluding `permission_relationships` and `resourcegroupstaggingapi`
|
||||
progress_step = (max_progress - current_progress) / n_steps
|
||||
|
||||
failed_syncs = {}
|
||||
|
||||
for func_name in requested_syncs:
|
||||
if func_name in cartography_aws.RESOURCE_FUNCTIONS:
|
||||
logger.info(
|
||||
f"Syncing function {func_name} for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
|
||||
# Updating progress, not really the right place but good enough
|
||||
current_progress += progress_step
|
||||
db_utils.update_attack_paths_scan_progress(
|
||||
attack_paths_scan, int(current_progress)
|
||||
)
|
||||
|
||||
try:
|
||||
# `ecr:image_layers` uses `aioboto3_session` instead of `boto3_session`
|
||||
if func_name == "ecr:image_layers":
|
||||
cartography_aws.RESOURCE_FUNCTIONS[func_name](
|
||||
neo4j_session=sync_args.get("neo4j_session"),
|
||||
aioboto3_session=get_aioboto3_session(
|
||||
sync_args.get("boto3_session")
|
||||
),
|
||||
regions=sync_args.get("regions"),
|
||||
current_aws_account_id=sync_args.get("current_aws_account_id"),
|
||||
update_tag=sync_args.get("update_tag"),
|
||||
common_job_parameters=sync_args.get("common_job_parameters"),
|
||||
)
|
||||
|
||||
# Skip permission relationships and tags for now because they rely on data already being in the graph
|
||||
elif func_name in [
|
||||
"permission_relationships",
|
||||
"resourcegroupstaggingapi",
|
||||
]:
|
||||
continue
|
||||
|
||||
else:
|
||||
cartography_aws.RESOURCE_FUNCTIONS[func_name](**sync_args)
|
||||
|
||||
except Exception as e:
|
||||
exception_message = utils.stringify_exception(
|
||||
e, f"Exception for AWS sync function: {func_name}"
|
||||
)
|
||||
failed_syncs[func_name] = exception_message
|
||||
|
||||
logger.warning(
|
||||
f"Caught exception syncing function {func_name} from AWS account {prowler_api_provider.uid}. We "
|
||||
"are continuing on to the next AWS sync function.",
|
||||
)
|
||||
|
||||
continue
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f'AWS sync function "{func_name}" was specified but does not exist. Did you misspell it?'
|
||||
)
|
||||
|
||||
return failed_syncs
|
||||
@@ -0,0 +1,158 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
AttackPathsScan as ProwlerAPIAttackPathsScan,
|
||||
Provider as ProwlerAPIProvider,
|
||||
StateChoices,
|
||||
)
|
||||
from tasks.jobs.attack_paths.providers import is_provider_available
|
||||
|
||||
|
||||
def create_attack_paths_scan(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
provider_id: int,
|
||||
) -> ProwlerAPIAttackPathsScan | None:
|
||||
with rls_transaction(tenant_id):
|
||||
prowler_api_provider = ProwlerAPIProvider.objects.get(id=provider_id)
|
||||
|
||||
if not is_provider_available(prowler_api_provider.provider):
|
||||
return None
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
attack_paths_scan = ProwlerAPIAttackPathsScan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider_id,
|
||||
scan_id=scan_id,
|
||||
state=StateChoices.SCHEDULED,
|
||||
started_at=datetime.now(tz=timezone.utc),
|
||||
)
|
||||
attack_paths_scan.save()
|
||||
|
||||
return attack_paths_scan
|
||||
|
||||
|
||||
def retrieve_attack_paths_scan(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
) -> ProwlerAPIAttackPathsScan | None:
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
attack_paths_scan = ProwlerAPIAttackPathsScan.objects.get(
|
||||
scan_id=scan_id,
|
||||
)
|
||||
|
||||
return attack_paths_scan
|
||||
|
||||
except ProwlerAPIAttackPathsScan.DoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def starting_attack_paths_scan(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
task_id: str,
|
||||
cartography_config: CartographyConfig,
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
attack_paths_scan.task_id = task_id
|
||||
attack_paths_scan.state = StateChoices.EXECUTING
|
||||
attack_paths_scan.started_at = datetime.now(tz=timezone.utc)
|
||||
attack_paths_scan.update_tag = cartography_config.update_tag
|
||||
attack_paths_scan.graph_database = cartography_config.neo4j_database
|
||||
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"task_id",
|
||||
"state",
|
||||
"started_at",
|
||||
"update_tag",
|
||||
"graph_database",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def finish_attack_paths_scan(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
state: StateChoices,
|
||||
ingestion_exceptions: dict[str, Any],
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
duration = int((now - attack_paths_scan.started_at).total_seconds())
|
||||
|
||||
attack_paths_scan.state = state
|
||||
attack_paths_scan.progress = 100
|
||||
attack_paths_scan.completed_at = now
|
||||
attack_paths_scan.duration = duration
|
||||
attack_paths_scan.ingestion_exceptions = ingestion_exceptions
|
||||
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"state",
|
||||
"progress",
|
||||
"completed_at",
|
||||
"duration",
|
||||
"ingestion_exceptions",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def update_attack_paths_scan_progress(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
progress: int,
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
attack_paths_scan.progress = progress
|
||||
attack_paths_scan.save(update_fields=["progress"])
|
||||
|
||||
|
||||
def get_old_attack_paths_scans(
|
||||
tenant_id: str,
|
||||
provider_id: str,
|
||||
attack_paths_scan_id: str,
|
||||
) -> list[ProwlerAPIAttackPathsScan]:
|
||||
"""
|
||||
An `old_attack_paths_scan` is any `completed` Attack Paths scan for the same provider,
|
||||
with its graph database not deleted, excluding the current Attack Paths scan.
|
||||
"""
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
completed_scans_qs = (
|
||||
ProwlerAPIAttackPathsScan.objects.filter(
|
||||
provider_id=provider_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
is_graph_database_deleted=False,
|
||||
)
|
||||
.exclude(id=attack_paths_scan_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
return list(completed_scans_qs)
|
||||
|
||||
|
||||
def update_old_attack_paths_scan(
|
||||
old_attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
) -> None:
|
||||
with rls_transaction(old_attack_paths_scan.tenant_id):
|
||||
old_attack_paths_scan.is_graph_database_deleted = True
|
||||
old_attack_paths_scan.save(update_fields=["is_graph_database_deleted"])
|
||||
|
||||
|
||||
def get_provider_graph_database_names(tenant_id: str, provider_id: str) -> list[str]:
|
||||
"""
|
||||
Return existing graph database names for a tenant/provider.
|
||||
|
||||
Note: For accesing the `AttackPathsScan` we need to use `all_objects` manager because the provider is soft-deleted.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
graph_databases_names_qs = ProwlerAPIAttackPathsScan.all_objects.filter(
|
||||
provider_id=provider_id,
|
||||
is_graph_database_deleted=False,
|
||||
).values_list("graph_database", flat=True)
|
||||
|
||||
return list(graph_databases_names_qs)
|
||||
@@ -0,0 +1,23 @@
|
||||
AVAILABLE_PROVIDERS: list[str] = [
|
||||
"aws",
|
||||
]
|
||||
|
||||
ROOT_NODE_LABELS: dict[str, str] = {
|
||||
"aws": "AWSAccount",
|
||||
}
|
||||
|
||||
NODE_UID_FIELDS: dict[str, str] = {
|
||||
"aws": "arn",
|
||||
}
|
||||
|
||||
|
||||
def is_provider_available(provider_type: str) -> bool:
|
||||
return provider_type in AVAILABLE_PROVIDERS
|
||||
|
||||
|
||||
def get_root_node_label(provider_type: str) -> str:
|
||||
return ROOT_NODE_LABELS.get(provider_type, "UnknownProviderAccount")
|
||||
|
||||
|
||||
def get_node_uid_field(provider_type: str) -> str:
|
||||
return NODE_UID_FIELDS.get(provider_type, "UnknownProviderUID")
|
||||
@@ -0,0 +1,205 @@
|
||||
import neo4j
|
||||
|
||||
from cartography.client.core.tx import run_write_query
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, ResourceFindingMapping
|
||||
from config.env import env
|
||||
from prowler.config import config as ProwlerConfig
|
||||
from tasks.jobs.attack_paths.providers import get_node_uid_field, get_root_node_label
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
BATCH_SIZE = env.int("NEO4J_INSERT_BATCH_SIZE", 500)
|
||||
|
||||
INDEX_STATEMENTS = [
|
||||
"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.id);",
|
||||
"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.provider_uid);",
|
||||
"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.lastupdated);",
|
||||
"CREATE INDEX prowler_finding_check_id IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.status);",
|
||||
]
|
||||
|
||||
INSERT_STATEMENT_TEMPLATE = """
|
||||
UNWIND $findings_data AS finding_data
|
||||
|
||||
MATCH (account:__ROOT_NODE_LABEL__ {id: $provider_uid})
|
||||
MATCH (account)-->(resource)
|
||||
WHERE resource.__NODE_UID_FIELD__ = finding_data.resource_uid
|
||||
OR resource.id = finding_data.resource_uid
|
||||
|
||||
MERGE (finding:ProwlerFinding {id: finding_data.id})
|
||||
ON CREATE SET
|
||||
finding.id = finding_data.id,
|
||||
finding.uid = finding_data.uid,
|
||||
finding.inserted_at = finding_data.inserted_at,
|
||||
finding.updated_at = finding_data.updated_at,
|
||||
finding.first_seen_at = finding_data.first_seen_at,
|
||||
finding.scan_id = finding_data.scan_id,
|
||||
finding.delta = finding_data.delta,
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.severity = finding_data.severity,
|
||||
finding.check_id = finding_data.check_id,
|
||||
finding.check_title = finding_data.check_title,
|
||||
finding.muted = finding_data.muted,
|
||||
finding.muted_reason = finding_data.muted_reason,
|
||||
finding.provider_uid = $provider_uid,
|
||||
finding.firstseen = timestamp(),
|
||||
finding.lastupdated = $last_updated,
|
||||
finding._module_name = 'cartography:prowler',
|
||||
finding._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.lastupdated = $last_updated
|
||||
|
||||
MERGE (resource)-[rel:HAS_FINDING]->(finding)
|
||||
ON CREATE SET
|
||||
rel.provider_uid = $provider_uid,
|
||||
rel.firstseen = timestamp(),
|
||||
rel.lastupdated = $last_updated,
|
||||
rel._module_name = 'cartography:prowler',
|
||||
rel._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
rel.lastupdated = $last_updated
|
||||
"""
|
||||
|
||||
CLEANUP_STATEMENT = """
|
||||
MATCH (finding:ProwlerFinding {provider_uid: $provider_uid})
|
||||
WHERE finding.lastupdated < $last_updated
|
||||
|
||||
WITH finding LIMIT $batch_size
|
||||
|
||||
DETACH DELETE finding
|
||||
|
||||
RETURN COUNT(finding) AS deleted_findings_count
|
||||
"""
|
||||
|
||||
|
||||
def create_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""
|
||||
Code based on Cartography version 0.122.0, specifically on `cartography.intel.create_indexes.run`.
|
||||
"""
|
||||
|
||||
logger.info("Creating indexes for Prowler node types.")
|
||||
for statement in INDEX_STATEMENTS:
|
||||
logger.debug("Executing statement: %s", statement)
|
||||
run_write_query(neo4j_session, statement)
|
||||
|
||||
|
||||
def analysis(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
findings_data = get_provider_last_scan_findings(prowler_api_provider, scan_id)
|
||||
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
|
||||
cleanup_findings(neo4j_session, prowler_api_provider, config)
|
||||
|
||||
|
||||
def get_provider_last_scan_findings(
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
) -> list[dict[str, str]]:
|
||||
with rls_transaction(prowler_api_provider.tenant_id):
|
||||
resource_finding_qs = ResourceFindingMapping.objects.filter(
|
||||
finding__scan_id=scan_id,
|
||||
).values(
|
||||
"resource__uid",
|
||||
"finding__id",
|
||||
"finding__uid",
|
||||
"finding__inserted_at",
|
||||
"finding__updated_at",
|
||||
"finding__first_seen_at",
|
||||
"finding__scan_id",
|
||||
"finding__delta",
|
||||
"finding__status",
|
||||
"finding__status_extended",
|
||||
"finding__severity",
|
||||
"finding__check_id",
|
||||
"finding__check_metadata__checktitle",
|
||||
"finding__muted",
|
||||
"finding__muted_reason",
|
||||
)
|
||||
|
||||
findings = []
|
||||
for resource_finding in resource_finding_qs:
|
||||
findings.append(
|
||||
{
|
||||
"resource_uid": str(resource_finding["resource__uid"]),
|
||||
"id": str(resource_finding["finding__id"]),
|
||||
"uid": resource_finding["finding__uid"],
|
||||
"inserted_at": resource_finding["finding__inserted_at"],
|
||||
"updated_at": resource_finding["finding__updated_at"],
|
||||
"first_seen_at": resource_finding["finding__first_seen_at"],
|
||||
"scan_id": str(resource_finding["finding__scan_id"]),
|
||||
"delta": resource_finding["finding__delta"],
|
||||
"status": resource_finding["finding__status"],
|
||||
"status_extended": resource_finding["finding__status_extended"],
|
||||
"severity": resource_finding["finding__severity"],
|
||||
"check_id": str(resource_finding["finding__check_id"]),
|
||||
"check_title": resource_finding[
|
||||
"finding__check_metadata__checktitle"
|
||||
],
|
||||
"muted": resource_finding["finding__muted"],
|
||||
"muted_reason": resource_finding["finding__muted_reason"],
|
||||
}
|
||||
)
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def load_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
findings_data: list[dict[str, str]],
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
replacements = {
|
||||
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
|
||||
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
|
||||
}
|
||||
query = INSERT_STATEMENT_TEMPLATE
|
||||
for replace_key, replace_value in replacements.items():
|
||||
query = query.replace(replace_key, replace_value)
|
||||
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"prowler_version": ProwlerConfig.prowler_version,
|
||||
}
|
||||
|
||||
total_length = len(findings_data)
|
||||
for i in range(0, total_length, BATCH_SIZE):
|
||||
parameters["findings_data"] = findings_data[i : i + BATCH_SIZE]
|
||||
|
||||
logger.info(
|
||||
f"Loading findings batch {i // BATCH_SIZE + 1} / {(total_length + BATCH_SIZE - 1) // BATCH_SIZE}"
|
||||
)
|
||||
|
||||
neo4j_session.run(query, parameters)
|
||||
|
||||
|
||||
def cleanup_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
batch = 1
|
||||
deleted_count = 1
|
||||
while deleted_count > 0:
|
||||
logger.info(f"Cleaning findings batch {batch}")
|
||||
|
||||
result = neo4j_session.run(CLEANUP_STATEMENT, parameters)
|
||||
|
||||
deleted_count = result.single().get("deleted_findings_count", 0)
|
||||
batch += 1
|
||||
@@ -0,0 +1,183 @@
|
||||
import logging
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
from typing import Any, Callable
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from cartography.intel import analysis as cartography_analysis
|
||||
from cartography.intel import create_indexes as cartography_create_indexes
|
||||
from cartography.intel import ontology as cartography_ontology
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
Provider as ProwlerAPIProvider,
|
||||
StateChoices,
|
||||
)
|
||||
from api.utils import initialize_prowler_provider
|
||||
from tasks.jobs.attack_paths import aws, db_utils, prowler, utils
|
||||
|
||||
# Without this Celery goes crazy with Cartography logging
|
||||
logging.getLogger("cartography").setLevel(logging.ERROR)
|
||||
logging.getLogger("neo4j").propagate = False
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
CARTOGRAPHY_INGESTION_FUNCTIONS: dict[str, Callable] = {
|
||||
"aws": aws.start_aws_ingestion,
|
||||
}
|
||||
|
||||
|
||||
def get_cartography_ingestion_function(provider_type: str) -> Callable | None:
|
||||
return CARTOGRAPHY_INGESTION_FUNCTIONS.get(provider_type)
|
||||
|
||||
|
||||
def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Code based on Cartography version 0.122.0, specifically on `cartography.cli.main`, `cartography.cli.CLI.main`,
|
||||
`cartography.sync.run_with_config` and `cartography.sync.Sync.run`.
|
||||
"""
|
||||
ingestion_exceptions = {} # This will hold any exceptions raised during ingestion
|
||||
|
||||
# Prowler necessary objects
|
||||
with rls_transaction(tenant_id):
|
||||
prowler_api_provider = ProwlerAPIProvider.objects.get(scan__pk=scan_id)
|
||||
prowler_sdk_provider = initialize_prowler_provider(prowler_api_provider)
|
||||
|
||||
# Attack Paths Scan necessary objects
|
||||
cartography_ingestion_function = get_cartography_ingestion_function(
|
||||
prowler_api_provider.provider
|
||||
)
|
||||
attack_paths_scan = db_utils.retrieve_attack_paths_scan(tenant_id, scan_id)
|
||||
|
||||
# Checks before starting the scan
|
||||
if not cartography_ingestion_function:
|
||||
ingestion_exceptions = {
|
||||
"global_error": f"Provider {prowler_api_provider.provider} is not supported for Attack Paths scans"
|
||||
}
|
||||
if attack_paths_scan:
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
f"Provider {prowler_api_provider.provider} is not supported for Attack Paths scans"
|
||||
)
|
||||
return ingestion_exceptions
|
||||
|
||||
else:
|
||||
if not attack_paths_scan:
|
||||
logger.warning(
|
||||
f"No Attack Paths Scan found for scan {scan_id} and tenant {tenant_id}, let's create it then"
|
||||
)
|
||||
attack_paths_scan = db_utils.create_attack_paths_scan(
|
||||
tenant_id, scan_id, prowler_api_provider.id
|
||||
)
|
||||
|
||||
# While creating the Cartography configuration, attributes `neo4j_user` and `neo4j_password` are not really needed in this config object
|
||||
cartography_config = CartographyConfig(
|
||||
neo4j_uri=graph_database.get_uri(),
|
||||
neo4j_database=graph_database.get_database_name(attack_paths_scan.id),
|
||||
update_tag=int(time.time()),
|
||||
)
|
||||
|
||||
# Starting the Attack Paths scan
|
||||
db_utils.starting_attack_paths_scan(attack_paths_scan, task_id, cartography_config)
|
||||
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating Neo4j database {cartography_config.neo4j_database} for tenant {prowler_api_provider.tenant_id}"
|
||||
)
|
||||
|
||||
graph_database.create_database(cartography_config.neo4j_database)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 1)
|
||||
|
||||
logger.info(
|
||||
f"Starting Cartography ({attack_paths_scan.id}) for "
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
with graph_database.get_session(
|
||||
cartography_config.neo4j_database
|
||||
) as neo4j_session:
|
||||
# Indexes creation
|
||||
cartography_create_indexes.run(neo4j_session, cartography_config)
|
||||
prowler.create_indexes(neo4j_session)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
|
||||
|
||||
# The real scan, where iterates over cloud services
|
||||
ingestion_exceptions = _call_within_event_loop(
|
||||
cartography_ingestion_function,
|
||||
neo4j_session,
|
||||
cartography_config,
|
||||
prowler_api_provider,
|
||||
prowler_sdk_provider,
|
||||
attack_paths_scan,
|
||||
)
|
||||
|
||||
# Post-processing: Just keeping it to be more Cartography compliant
|
||||
cartography_ontology.run(neo4j_session, cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
|
||||
|
||||
cartography_analysis.run(neo4j_session, cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
|
||||
|
||||
# Adding Prowler nodes and relationships
|
||||
prowler.analysis(
|
||||
neo4j_session, prowler_api_provider, scan_id, cartography_config
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Completed Cartography ({attack_paths_scan.id}) for "
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
|
||||
# Handling databases changes
|
||||
old_attack_paths_scans = db_utils.get_old_attack_paths_scans(
|
||||
prowler_api_provider.tenant_id,
|
||||
prowler_api_provider.id,
|
||||
attack_paths_scan.id,
|
||||
)
|
||||
for old_attack_paths_scan in old_attack_paths_scans:
|
||||
graph_database.drop_database(old_attack_paths_scan.graph_database)
|
||||
db_utils.update_old_attack_paths_scan(old_attack_paths_scan)
|
||||
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
|
||||
)
|
||||
return ingestion_exceptions
|
||||
|
||||
except Exception as e:
|
||||
exception_message = utils.stringify_exception(e, "Cartography failed")
|
||||
logger.error(exception_message)
|
||||
ingestion_exceptions["global_cartography_error"] = exception_message
|
||||
|
||||
# Handling databases changes
|
||||
graph_database.drop_database(cartography_config.neo4j_database)
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.FAILED, ingestion_exceptions
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _call_within_event_loop(fn, *args, **kwargs):
|
||||
"""
|
||||
Cartography needs a running event loop, so assuming there is none (Celery task or even regular DRF endpoint),
|
||||
let's create a new one and set it as the current event loop for this thread.
|
||||
"""
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
asyncio.set_event_loop(loop)
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
finally:
|
||||
try:
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
loop.close()
|
||||
asyncio.set_event_loop(None)
|
||||
@@ -0,0 +1,10 @@
|
||||
import traceback
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
def stringify_exception(exception: Exception, context: str) -> str:
|
||||
timestamp = datetime.now(tz=timezone.utc)
|
||||
exception_traceback = traceback.TracebackException.from_exception(exception)
|
||||
traceback_string = "".join(exception_traceback.format())
|
||||
return f"{timestamp} - {context}\n{traceback_string}"
|
||||
@@ -1,5 +1,10 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
ComplianceOverviewSummary,
|
||||
ComplianceRequirementOverview,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceScanSummary,
|
||||
@@ -9,7 +14,7 @@ from api.models import (
|
||||
|
||||
|
||||
def backfill_resource_scan_summaries(tenant_id: str, scan_id: str):
|
||||
with rls_transaction(tenant_id):
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
if ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists():
|
||||
@@ -59,3 +64,114 @@ def backfill_resource_scan_summaries(tenant_id: str, scan_id: str):
|
||||
)
|
||||
|
||||
return {"status": "backfilled", "inserted": len(summaries)}
|
||||
|
||||
|
||||
def backfill_compliance_summaries(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Backfill ComplianceOverviewSummary records for a completed scan.
|
||||
|
||||
This function checks if summary records already exist for the scan.
|
||||
If not, it aggregates compliance requirement data and creates the summaries.
|
||||
|
||||
Args:
|
||||
tenant_id: Target tenant UUID
|
||||
scan_id: Scan UUID to backfill
|
||||
|
||||
Returns:
|
||||
dict: Status indicating whether backfill was performed
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
if ComplianceOverviewSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists():
|
||||
return {"status": "already backfilled"}
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
if not Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id=scan_id,
|
||||
state__in=(StateChoices.COMPLETED, StateChoices.FAILED),
|
||||
).exists():
|
||||
return {"status": "scan is not completed"}
|
||||
|
||||
# Fetch all compliance requirement overview rows for this scan
|
||||
requirement_rows = ComplianceRequirementOverview.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).values(
|
||||
"compliance_id",
|
||||
"requirement_id",
|
||||
"requirement_status",
|
||||
)
|
||||
|
||||
if not requirement_rows:
|
||||
return {"status": "no compliance data to backfill"}
|
||||
|
||||
# Group by (compliance_id, requirement_id) across regions
|
||||
requirement_statuses = defaultdict(
|
||||
lambda: {"fail_count": 0, "pass_count": 0, "total_count": 0}
|
||||
)
|
||||
|
||||
for row in requirement_rows:
|
||||
compliance_id = row["compliance_id"]
|
||||
requirement_id = row["requirement_id"]
|
||||
requirement_status = row["requirement_status"]
|
||||
|
||||
# Aggregate requirement status across regions
|
||||
key = (compliance_id, requirement_id)
|
||||
requirement_statuses[key]["total_count"] += 1
|
||||
|
||||
if requirement_status == "FAIL":
|
||||
requirement_statuses[key]["fail_count"] += 1
|
||||
elif requirement_status == "PASS":
|
||||
requirement_statuses[key]["pass_count"] += 1
|
||||
|
||||
# Determine per-requirement status and aggregate to compliance level
|
||||
compliance_summaries = defaultdict(
|
||||
lambda: {
|
||||
"total_requirements": 0,
|
||||
"requirements_passed": 0,
|
||||
"requirements_failed": 0,
|
||||
"requirements_manual": 0,
|
||||
}
|
||||
)
|
||||
|
||||
for (compliance_id, requirement_id), counts in requirement_statuses.items():
|
||||
# Apply business rule: any FAIL → requirement fails
|
||||
if counts["fail_count"] > 0:
|
||||
req_status = "FAIL"
|
||||
elif counts["pass_count"] == counts["total_count"]:
|
||||
req_status = "PASS"
|
||||
else:
|
||||
req_status = "MANUAL"
|
||||
|
||||
# Aggregate to compliance level
|
||||
compliance_summaries[compliance_id]["total_requirements"] += 1
|
||||
if req_status == "PASS":
|
||||
compliance_summaries[compliance_id]["requirements_passed"] += 1
|
||||
elif req_status == "FAIL":
|
||||
compliance_summaries[compliance_id]["requirements_failed"] += 1
|
||||
else:
|
||||
compliance_summaries[compliance_id]["requirements_manual"] += 1
|
||||
|
||||
# Create summary objects
|
||||
summary_objects = []
|
||||
for compliance_id, data in compliance_summaries.items():
|
||||
summary_objects.append(
|
||||
ComplianceOverviewSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
compliance_id=compliance_id,
|
||||
requirements_passed=data["requirements_passed"],
|
||||
requirements_failed=data["requirements_failed"],
|
||||
requirements_manual=data["requirements_manual"],
|
||||
total_requirements=data["total_requirements"],
|
||||
)
|
||||
)
|
||||
|
||||
# Bulk insert summaries
|
||||
if summary_objects:
|
||||
ComplianceOverviewSummary.objects.bulk_create(
|
||||
summary_objects, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
|
||||
return {"status": "backfilled", "inserted": len(summary_objects)}
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
from celery.utils.log import get_task_logger
|
||||
from django.db import DatabaseError
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.db_router import MainRouter
|
||||
from api.db_utils import batch_delete, rls_transaction
|
||||
from api.models import Finding, Provider, Resource, Scan, ScanSummary, Tenant
|
||||
from api.models import (
|
||||
AttackPathsScan,
|
||||
Finding,
|
||||
Provider,
|
||||
Resource,
|
||||
Scan,
|
||||
ScanSummary,
|
||||
Tenant,
|
||||
)
|
||||
from tasks.jobs.attack_paths.db_utils import get_provider_graph_database_names
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -23,16 +33,27 @@ def delete_provider(tenant_id: str, pk: str):
|
||||
Raises:
|
||||
Provider.DoesNotExist: If no instance with the provided primary key exists.
|
||||
"""
|
||||
# Delete the Attack Paths' graph databases related to the provider
|
||||
graph_database_names = get_provider_graph_database_names(tenant_id, pk)
|
||||
try:
|
||||
for graph_database_name in graph_database_names:
|
||||
graph_database.drop_database(graph_database_name)
|
||||
except graph_database.GraphDatabaseQueryException as gdb_error:
|
||||
logger.error(f"Error deleting Provider databases: {gdb_error}")
|
||||
raise
|
||||
|
||||
# Get all provider related data and delete them in batches
|
||||
with rls_transaction(tenant_id):
|
||||
instance = Provider.all_objects.get(pk=pk)
|
||||
deletion_summary = {}
|
||||
deletion_steps = [
|
||||
("Scan Summaries", ScanSummary.all_objects.filter(scan__provider=instance)),
|
||||
("Findings", Finding.all_objects.filter(scan__provider=instance)),
|
||||
("Resources", Resource.all_objects.filter(provider=instance)),
|
||||
("Scans", Scan.all_objects.filter(provider=instance)),
|
||||
("AttackPathsScans", AttackPathsScan.all_objects.filter(provider=instance)),
|
||||
]
|
||||
|
||||
deletion_summary = {}
|
||||
for step_name, queryset in deletion_steps:
|
||||
try:
|
||||
_, step_summary = batch_delete(tenant_id, queryset)
|
||||
@@ -48,6 +69,7 @@ def delete_provider(tenant_id: str, pk: str):
|
||||
except DatabaseError as db_error:
|
||||
logger.error(f"Error deleting Provider: {db_error}")
|
||||
raise
|
||||
|
||||
return deletion_summary
|
||||
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ from prowler.config.config import (
|
||||
html_file_suffix,
|
||||
json_asff_file_suffix,
|
||||
json_ocsf_file_suffix,
|
||||
set_output_timestamp,
|
||||
)
|
||||
from prowler.lib.outputs.asff.asff import ASFF
|
||||
from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected import (
|
||||
@@ -58,6 +59,9 @@ from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azur
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
|
||||
ProwlerThreatScoreGCP,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_kubernetes import (
|
||||
ProwlerThreatScoreKubernetes,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_m365 import (
|
||||
ProwlerThreatScoreM365,
|
||||
)
|
||||
@@ -104,6 +108,10 @@ COMPLIANCE_CLASS_MAP = {
|
||||
"kubernetes": [
|
||||
(lambda name: name.startswith("cis_"), KubernetesCIS),
|
||||
(lambda name: name.startswith("iso27001_"), KubernetesISO27001),
|
||||
(
|
||||
lambda name: name == "prowler_threatscore_kubernetes",
|
||||
ProwlerThreatScoreKubernetes,
|
||||
),
|
||||
],
|
||||
"m365": [
|
||||
(lambda name: name.startswith("cis_"), M365CIS),
|
||||
@@ -234,36 +242,33 @@ def _upload_to_s3(
|
||||
logger.error(f"S3 upload failed: {str(e)}")
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
output_directory, prowler_provider: object, tenant_id: str, scan_id: str
|
||||
) -> tuple[str, str, str]:
|
||||
def _build_output_path(
|
||||
output_directory: str,
|
||||
prowler_provider: str,
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
subdirectory: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a file system path for the output directory of a prowler scan.
|
||||
|
||||
This function constructs the output directory path by combining a base
|
||||
temporary output directory, the tenant ID, the scan ID, and details about
|
||||
the prowler provider along with a timestamp. The resulting path is used to
|
||||
store the output files of a prowler scan.
|
||||
|
||||
Note:
|
||||
This function depends on one external variable:
|
||||
- `output_file_timestamp`: A timestamp (as a string) used to uniquely identify the output.
|
||||
Build a file system path for the output directory of a prowler scan.
|
||||
|
||||
Args:
|
||||
output_directory (str): The base output directory.
|
||||
prowler_provider (object): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
prowler_provider (str): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
tenant_id (str): The unique identifier for the tenant.
|
||||
scan_id (str): The unique identifier for the scan.
|
||||
subdirectory (str, optional): Optional subdirectory to include in the path
|
||||
(e.g., "compliance", "threatscore", "ens").
|
||||
|
||||
Returns:
|
||||
str: The constructed file system path for the prowler scan output directory.
|
||||
str: The constructed path with directory created.
|
||||
|
||||
Example:
|
||||
>>> _generate_output_directory("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
'/tmp/tenant-1234/aws/scan-5678/prowler-output-2023-02-15T12:34:56',
|
||||
'/tmp/tenant-1234/aws/scan-5678/compliance/prowler-output-2023-02-15T12:34:56'
|
||||
'/tmp/tenant-1234/aws/scan-5678/threatscore/prowler-output-2023-02-15T12:34:56'
|
||||
>>> _build_output_path("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
'/tmp/tenant-1234/scan-5678/prowler-output-aws-20230215123456'
|
||||
>>> _build_output_path("/tmp", "aws", "tenant-1234", "scan-5678", "threatscore")
|
||||
'/tmp/tenant-1234/scan-5678/threatscore/prowler-output-aws-20230215123456'
|
||||
"""
|
||||
# Sanitize the prowler provider name to ensure it is a valid directory name
|
||||
prowler_provider_sanitized = re.sub(r"[^\w\-]", "-", prowler_provider)
|
||||
@@ -271,23 +276,107 @@ def _generate_output_directory(
|
||||
with rls_transaction(tenant_id):
|
||||
started_at = Scan.objects.get(id=scan_id).started_at
|
||||
|
||||
set_output_timestamp(started_at)
|
||||
|
||||
timestamp = started_at.strftime("%Y%m%d%H%M%S")
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
|
||||
if subdirectory:
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/{subdirectory}/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
else:
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
|
||||
# Create directory for the path if it doesn't exist
|
||||
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
compliance_path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/compliance/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(compliance_path.split("/")[:-1]), exist_ok=True)
|
||||
return path
|
||||
|
||||
threatscore_path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/threatscore/prowler-output-"
|
||||
f"{prowler_provider_sanitized}-{timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(threatscore_path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
return path, compliance_path, threatscore_path
|
||||
def _generate_compliance_output_directory(
|
||||
output_directory: str,
|
||||
prowler_provider: str,
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
compliance_framework: str,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a file system path for a compliance framework output directory.
|
||||
|
||||
This function constructs the output directory path specifically for a compliance
|
||||
framework (e.g., "threatscore", "ens") by combining a base temporary output directory,
|
||||
the tenant ID, the scan ID, the compliance framework name, and details about the
|
||||
prowler provider along with a timestamp.
|
||||
|
||||
Args:
|
||||
output_directory (str): The base output directory.
|
||||
prowler_provider (str): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
tenant_id (str): The unique identifier for the tenant.
|
||||
scan_id (str): The unique identifier for the scan.
|
||||
compliance_framework (str): The compliance framework name (e.g., "threatscore", "ens").
|
||||
|
||||
Returns:
|
||||
str: The path for the compliance framework output directory.
|
||||
|
||||
Example:
|
||||
>>> _generate_compliance_output_directory("/tmp", "aws", "tenant-1234", "scan-5678", "threatscore")
|
||||
'/tmp/tenant-1234/scan-5678/threatscore/prowler-output-aws-20230215123456'
|
||||
>>> _generate_compliance_output_directory("/tmp", "aws", "tenant-1234", "scan-5678", "ens")
|
||||
'/tmp/tenant-1234/scan-5678/ens/prowler-output-aws-20230215123456'
|
||||
>>> _generate_compliance_output_directory("/tmp", "aws", "tenant-1234", "scan-5678", "nis2")
|
||||
'/tmp/tenant-1234/scan-5678/nis2/prowler-output-aws-20230215123456'
|
||||
"""
|
||||
return _build_output_path(
|
||||
output_directory,
|
||||
prowler_provider,
|
||||
tenant_id,
|
||||
scan_id,
|
||||
subdirectory=compliance_framework,
|
||||
)
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
output_directory: str,
|
||||
prowler_provider: str,
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Generate file system paths for the standard and compliance output directories of a prowler scan.
|
||||
|
||||
This function constructs both the standard output directory path and the compliance
|
||||
output directory path by combining a base temporary output directory, the tenant ID,
|
||||
the scan ID, and details about the prowler provider along with a timestamp.
|
||||
|
||||
Args:
|
||||
output_directory (str): The base output directory.
|
||||
prowler_provider (str): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
tenant_id (str): The unique identifier for the tenant.
|
||||
scan_id (str): The unique identifier for the scan.
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: A tuple containing (standard_path, compliance_path).
|
||||
|
||||
Example:
|
||||
>>> _generate_output_directory("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
('/tmp/tenant-1234/scan-5678/prowler-output-aws-20230215123456',
|
||||
'/tmp/tenant-1234/scan-5678/compliance/prowler-output-aws-20230215123456')
|
||||
"""
|
||||
standard_path = _build_output_path(
|
||||
output_directory, prowler_provider, tenant_id, scan_id
|
||||
)
|
||||
compliance_path = _build_output_path(
|
||||
output_directory,
|
||||
prowler_provider,
|
||||
tenant_id,
|
||||
scan_id,
|
||||
subdirectory="compliance",
|
||||
)
|
||||
|
||||
return standard_path, compliance_path
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
from celery.utils.log import get_task_logger
|
||||
from tasks.jobs.threatscore_utils import (
|
||||
_aggregate_requirement_statistics_from_database,
|
||||
_calculate_requirements_data_from_statistics,
|
||||
)
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, StatusChoices
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def compute_threatscore_metrics(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
provider_id: str,
|
||||
compliance_id: str,
|
||||
min_risk_level: int = 4,
|
||||
) -> dict:
|
||||
"""
|
||||
Compute ThreatScore metrics for a given scan.
|
||||
|
||||
This function calculates all the metrics needed for a ThreatScore snapshot:
|
||||
- Overall ThreatScore percentage
|
||||
- Section-by-section scores
|
||||
- Critical failed requirements (risk >= min_risk_level)
|
||||
- Summary statistics (requirements and findings counts)
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for Row-Level Security context.
|
||||
scan_id (str): The ID of the scan to analyze.
|
||||
provider_id (str): The ID of the provider used in the scan.
|
||||
compliance_id (str): Compliance framework ID (e.g., "prowler_threatscore_aws").
|
||||
min_risk_level (int): Minimum risk level for critical requirements. Defaults to 4.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing:
|
||||
- overall_score (float): Overall ThreatScore percentage (0-100)
|
||||
- section_scores (dict): Section name -> score percentage mapping
|
||||
- critical_requirements (list): List of critical failed requirement dicts
|
||||
- total_requirements (int): Total number of requirements
|
||||
- passed_requirements (int): Number of PASS requirements
|
||||
- failed_requirements (int): Number of FAIL requirements
|
||||
- manual_requirements (int): Number of MANUAL requirements
|
||||
- total_findings (int): Total findings count
|
||||
- passed_findings (int): Passed findings count
|
||||
- failed_findings (int): Failed findings count
|
||||
|
||||
Example:
|
||||
>>> metrics = compute_threatscore_metrics(
|
||||
... tenant_id="tenant-123",
|
||||
... scan_id="scan-456",
|
||||
... provider_id="provider-789",
|
||||
... compliance_id="prowler_threatscore_aws"
|
||||
... )
|
||||
>>> print(f"Overall ThreatScore: {metrics['overall_score']:.2f}%")
|
||||
"""
|
||||
# Get provider and compliance information
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
provider_obj = Provider.objects.get(id=provider_id)
|
||||
provider_type = provider_obj.provider
|
||||
|
||||
frameworks_bulk = Compliance.get_bulk(provider_type)
|
||||
compliance_obj = frameworks_bulk[compliance_id]
|
||||
|
||||
# Aggregate requirement statistics from database
|
||||
requirement_statistics_by_check_id = (
|
||||
_aggregate_requirement_statistics_from_database(tenant_id, scan_id)
|
||||
)
|
||||
|
||||
# Calculate requirements data using aggregated statistics
|
||||
attributes_by_requirement_id, requirements_list = (
|
||||
_calculate_requirements_data_from_statistics(
|
||||
compliance_obj, requirement_statistics_by_check_id
|
||||
)
|
||||
)
|
||||
|
||||
# Initialize metrics
|
||||
overall_numerator = 0
|
||||
overall_denominator = 0
|
||||
overall_has_findings = False
|
||||
|
||||
sections_data = {}
|
||||
|
||||
total_requirements = len(requirements_list)
|
||||
passed_requirements = 0
|
||||
failed_requirements = 0
|
||||
manual_requirements = 0
|
||||
total_findings = 0
|
||||
passed_findings = 0
|
||||
failed_findings = 0
|
||||
|
||||
critical_requirements_list = []
|
||||
|
||||
# Process each requirement
|
||||
for requirement in requirements_list:
|
||||
requirement_id = requirement["id"]
|
||||
requirement_status = requirement["attributes"]["status"]
|
||||
requirement_attributes = attributes_by_requirement_id.get(requirement_id, {})
|
||||
|
||||
# Count requirements by status
|
||||
if requirement_status == StatusChoices.PASS:
|
||||
passed_requirements += 1
|
||||
elif requirement_status == StatusChoices.FAIL:
|
||||
failed_requirements += 1
|
||||
elif requirement_status == StatusChoices.MANUAL:
|
||||
manual_requirements += 1
|
||||
|
||||
# Get findings data
|
||||
req_passed_findings = requirement["attributes"].get("passed_findings", 0)
|
||||
req_total_findings = requirement["attributes"].get("total_findings", 0)
|
||||
|
||||
# Accumulate findings counts
|
||||
total_findings += req_total_findings
|
||||
passed_findings += req_passed_findings
|
||||
failed_findings += req_total_findings - req_passed_findings
|
||||
|
||||
# Skip requirements with no findings
|
||||
if req_total_findings == 0:
|
||||
continue
|
||||
|
||||
overall_has_findings = True
|
||||
|
||||
# Get requirement metadata
|
||||
metadata = requirement_attributes.get("attributes", {}).get(
|
||||
"req_attributes", []
|
||||
)
|
||||
if not metadata or len(metadata) == 0:
|
||||
continue
|
||||
|
||||
m = metadata[0]
|
||||
risk_level = getattr(m, "LevelOfRisk", 0)
|
||||
weight = getattr(m, "Weight", 0)
|
||||
section = getattr(m, "Section", "Unknown")
|
||||
|
||||
# Calculate ThreatScore components using formula from UI
|
||||
rate_i = req_passed_findings / req_total_findings
|
||||
rfac_i = 1 + 0.25 * risk_level
|
||||
|
||||
# Update overall score
|
||||
overall_numerator += rate_i * req_total_findings * weight * rfac_i
|
||||
overall_denominator += req_total_findings * weight * rfac_i
|
||||
|
||||
# Update section scores
|
||||
if section not in sections_data:
|
||||
sections_data[section] = {
|
||||
"numerator": 0,
|
||||
"denominator": 0,
|
||||
"has_findings": False,
|
||||
}
|
||||
|
||||
sections_data[section]["has_findings"] = True
|
||||
sections_data[section]["numerator"] += (
|
||||
rate_i * req_total_findings * weight * rfac_i
|
||||
)
|
||||
sections_data[section]["denominator"] += req_total_findings * weight * rfac_i
|
||||
|
||||
# Identify critical failed requirements
|
||||
if requirement_status == StatusChoices.FAIL and risk_level >= min_risk_level:
|
||||
critical_requirements_list.append(
|
||||
{
|
||||
"requirement_id": requirement_id,
|
||||
"title": getattr(m, "Title", "N/A"),
|
||||
"section": section,
|
||||
"subsection": getattr(m, "SubSection", "N/A"),
|
||||
"risk_level": risk_level,
|
||||
"weight": weight,
|
||||
"passed_findings": req_passed_findings,
|
||||
"total_findings": req_total_findings,
|
||||
"description": getattr(m, "AttributeDescription", "N/A"),
|
||||
}
|
||||
)
|
||||
|
||||
# Calculate overall ThreatScore
|
||||
if not overall_has_findings:
|
||||
overall_score = 100.0
|
||||
elif overall_denominator > 0:
|
||||
overall_score = (overall_numerator / overall_denominator) * 100
|
||||
else:
|
||||
overall_score = 0.0
|
||||
|
||||
# Calculate section scores
|
||||
section_scores = {}
|
||||
for section, data in sections_data.items():
|
||||
if data["has_findings"] and data["denominator"] > 0:
|
||||
section_scores[section] = (data["numerator"] / data["denominator"]) * 100
|
||||
else:
|
||||
section_scores[section] = 100.0
|
||||
|
||||
# Sort critical requirements by risk level (desc) and weight (desc)
|
||||
critical_requirements_list.sort(
|
||||
key=lambda x: (x["risk_level"], x["weight"]), reverse=True
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"ThreatScore computed: {overall_score:.2f}% "
|
||||
f"({passed_requirements}/{total_requirements} requirements passed, "
|
||||
f"{len(critical_requirements_list)} critical failures)"
|
||||
)
|
||||
|
||||
return {
|
||||
"overall_score": round(overall_score, 2),
|
||||
"section_scores": {k: round(v, 2) for k, v in section_scores.items()},
|
||||
"critical_requirements": critical_requirements_list,
|
||||
"total_requirements": total_requirements,
|
||||
"passed_requirements": passed_requirements,
|
||||
"failed_requirements": failed_requirements,
|
||||
"manual_requirements": manual_requirements,
|
||||
"total_findings": total_findings,
|
||||
"passed_findings": passed_findings,
|
||||
"failed_findings": failed_findings,
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from django.db.models import Count, Q
|
||||
from tasks.utils import batched
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding, StatusChoices
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def _aggregate_requirement_statistics_from_database(
|
||||
tenant_id: str, scan_id: str
|
||||
) -> dict[str, dict[str, int]]:
|
||||
"""
|
||||
Aggregate finding statistics by check_id using database aggregation.
|
||||
|
||||
This function uses Django ORM aggregation to calculate pass/fail statistics
|
||||
entirely in the database, avoiding the need to load findings into memory.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for Row-Level Security context.
|
||||
scan_id (str): The ID of the scan to retrieve findings for.
|
||||
|
||||
Returns:
|
||||
dict[str, dict[str, int]]: Dictionary mapping check_id to statistics:
|
||||
- 'passed' (int): Number of passed findings for this check
|
||||
- 'total' (int): Total number of findings for this check
|
||||
|
||||
Example:
|
||||
{
|
||||
'aws_iam_user_mfa_enabled': {'passed': 10, 'total': 15},
|
||||
'aws_s3_bucket_public_access': {'passed': 0, 'total': 5}
|
||||
}
|
||||
"""
|
||||
requirement_statistics_by_check_id = {}
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
aggregated_statistics_queryset = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id, muted=False
|
||||
)
|
||||
.values("check_id")
|
||||
.annotate(
|
||||
total_findings=Count(
|
||||
"id",
|
||||
filter=Q(status__in=[StatusChoices.PASS, StatusChoices.FAIL]),
|
||||
),
|
||||
passed_findings=Count("id", filter=Q(status=StatusChoices.PASS)),
|
||||
)
|
||||
)
|
||||
|
||||
for aggregated_stat in aggregated_statistics_queryset:
|
||||
check_id = aggregated_stat["check_id"]
|
||||
requirement_statistics_by_check_id[check_id] = {
|
||||
"passed": aggregated_stat["passed_findings"],
|
||||
"total": aggregated_stat["total_findings"],
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Aggregated statistics for {len(requirement_statistics_by_check_id)} unique checks"
|
||||
)
|
||||
return requirement_statistics_by_check_id
|
||||
|
||||
|
||||
def _calculate_requirements_data_from_statistics(
|
||||
compliance_obj, requirement_statistics_by_check_id: dict[str, dict[str, int]]
|
||||
) -> tuple[dict[str, dict], list[dict]]:
|
||||
"""
|
||||
Calculate requirement status and statistics using pre-aggregated database statistics.
|
||||
|
||||
Args:
|
||||
compliance_obj: The compliance framework object containing requirements.
|
||||
requirement_statistics_by_check_id (dict[str, dict[str, int]]): Pre-aggregated statistics
|
||||
mapping check_id to {'passed': int, 'total': int} counts.
|
||||
|
||||
Returns:
|
||||
tuple[dict[str, dict], list[dict]]: A tuple containing:
|
||||
- attributes_by_requirement_id: Dictionary mapping requirement IDs to their attributes.
|
||||
- requirements_list: List of requirement dictionaries with status and statistics.
|
||||
"""
|
||||
attributes_by_requirement_id = {}
|
||||
requirements_list = []
|
||||
|
||||
compliance_framework = getattr(compliance_obj, "Framework", "N/A")
|
||||
compliance_version = getattr(compliance_obj, "Version", "N/A")
|
||||
|
||||
for requirement in compliance_obj.Requirements:
|
||||
requirement_id = requirement.Id
|
||||
requirement_description = getattr(requirement, "Description", "")
|
||||
requirement_checks = getattr(requirement, "Checks", [])
|
||||
requirement_attributes = getattr(requirement, "Attributes", [])
|
||||
|
||||
attributes_by_requirement_id[requirement_id] = {
|
||||
"attributes": {
|
||||
"req_attributes": requirement_attributes,
|
||||
"checks": requirement_checks,
|
||||
},
|
||||
"description": requirement_description,
|
||||
}
|
||||
|
||||
total_passed_findings = 0
|
||||
total_findings_count = 0
|
||||
|
||||
for check_id in requirement_checks:
|
||||
if check_id in requirement_statistics_by_check_id:
|
||||
check_statistics = requirement_statistics_by_check_id[check_id]
|
||||
total_findings_count += check_statistics["total"]
|
||||
total_passed_findings += check_statistics["passed"]
|
||||
|
||||
if total_findings_count > 0:
|
||||
if total_passed_findings == total_findings_count:
|
||||
requirement_status = StatusChoices.PASS
|
||||
else:
|
||||
requirement_status = StatusChoices.FAIL
|
||||
else:
|
||||
requirement_status = StatusChoices.MANUAL
|
||||
|
||||
requirements_list.append(
|
||||
{
|
||||
"id": requirement_id,
|
||||
"attributes": {
|
||||
"framework": compliance_framework,
|
||||
"version": compliance_version,
|
||||
"status": requirement_status,
|
||||
"description": requirement_description,
|
||||
"passed_findings": total_passed_findings,
|
||||
"total_findings": total_findings_count,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
return attributes_by_requirement_id, requirements_list
|
||||
|
||||
|
||||
def _load_findings_for_requirement_checks(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
check_ids: list[str],
|
||||
prowler_provider,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
) -> dict[str, list[FindingOutput]]:
|
||||
"""
|
||||
Load findings for specific check IDs on-demand with optional caching.
|
||||
|
||||
This function loads only the findings needed for a specific set of checks,
|
||||
minimizing memory usage by avoiding loading all findings at once. This is used
|
||||
when generating detailed findings tables for specific requirements in the PDF.
|
||||
|
||||
Supports optional caching to avoid duplicate queries when generating multiple
|
||||
reports for the same scan.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for Row-Level Security context.
|
||||
scan_id (str): The ID of the scan to retrieve findings for.
|
||||
check_ids (list[str]): List of check IDs to load findings for.
|
||||
prowler_provider: The initialized Prowler provider instance.
|
||||
findings_cache (dict, optional): Cache of already loaded findings.
|
||||
If provided, checks are first looked up in cache before querying database.
|
||||
|
||||
Returns:
|
||||
dict[str, list[FindingOutput]]: Dictionary mapping check_id to list of FindingOutput objects.
|
||||
|
||||
Example:
|
||||
{
|
||||
'aws_iam_user_mfa_enabled': [FindingOutput(...), FindingOutput(...)],
|
||||
'aws_s3_bucket_public_access': [FindingOutput(...)]
|
||||
}
|
||||
"""
|
||||
findings_by_check_id = defaultdict(list)
|
||||
|
||||
if not check_ids:
|
||||
return dict(findings_by_check_id)
|
||||
|
||||
# Initialize cache if not provided
|
||||
if findings_cache is None:
|
||||
findings_cache = {}
|
||||
|
||||
# Separate cached and non-cached check_ids
|
||||
check_ids_to_load = []
|
||||
cache_hits = 0
|
||||
cache_misses = 0
|
||||
|
||||
for check_id in check_ids:
|
||||
if check_id in findings_cache:
|
||||
# Reuse from cache
|
||||
findings_by_check_id[check_id] = findings_cache[check_id]
|
||||
cache_hits += 1
|
||||
else:
|
||||
# Need to load from database
|
||||
check_ids_to_load.append(check_id)
|
||||
cache_misses += 1
|
||||
|
||||
if cache_hits > 0:
|
||||
logger.info(
|
||||
f"Findings cache: {cache_hits} hits, {cache_misses} misses "
|
||||
f"({cache_hits / (cache_hits + cache_misses) * 100:.1f}% hit rate)"
|
||||
)
|
||||
|
||||
# If all check_ids were in cache, return early
|
||||
if not check_ids_to_load:
|
||||
return dict(findings_by_check_id)
|
||||
|
||||
logger.info(f"Loading findings for {len(check_ids_to_load)} checks on-demand")
|
||||
|
||||
findings_queryset = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id, check_id__in=check_ids_to_load
|
||||
)
|
||||
.order_by("uid")
|
||||
.iterator()
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
for batch, is_last_batch in batched(
|
||||
findings_queryset, DJANGO_FINDINGS_BATCH_SIZE
|
||||
):
|
||||
for finding_model in batch:
|
||||
finding_output = FindingOutput.transform_api_finding(
|
||||
finding_model, prowler_provider
|
||||
)
|
||||
findings_by_check_id[finding_output.check_id].append(finding_output)
|
||||
# Update cache with newly loaded findings
|
||||
if finding_output.check_id not in findings_cache:
|
||||
findings_cache[finding_output.check_id] = []
|
||||
findings_cache[finding_output.check_id].append(finding_output)
|
||||
|
||||
total_findings_loaded = sum(
|
||||
len(findings) for findings in findings_by_check_id.values()
|
||||
)
|
||||
logger.info(
|
||||
f"Loaded {total_findings_loaded} findings for {len(findings_by_check_id)} checks"
|
||||
)
|
||||
|
||||
return dict(findings_by_check_id)
|
||||
@@ -1,14 +1,30 @@
|
||||
import os
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
|
||||
from celery import chain, group, shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
|
||||
from api.compliance import get_compliance_frameworks
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Finding, Integration, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from config.celery import RLSTask
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIRECTORY
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
from tasks.jobs.attack_paths import attack_paths_scan
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_compliance_summaries,
|
||||
backfill_resource_scan_summaries,
|
||||
)
|
||||
from tasks.jobs.connection import (
|
||||
check_integration_connection,
|
||||
check_lighthouse_connection,
|
||||
@@ -32,7 +48,7 @@ from tasks.jobs.lighthouse_providers import (
|
||||
refresh_lighthouse_provider_models,
|
||||
)
|
||||
from tasks.jobs.muting import mute_historical_findings
|
||||
from tasks.jobs.report import generate_threatscore_report_job
|
||||
from tasks.jobs.report import generate_compliance_reports_job
|
||||
from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
@@ -40,17 +56,6 @@ from tasks.jobs.scan import (
|
||||
)
|
||||
from tasks.utils import batched, get_next_execution_datetime
|
||||
|
||||
from api.compliance import get_compliance_frameworks
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Finding, Integration, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@@ -72,7 +77,8 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
group(
|
||||
generate_threatscore_report_task.si(
|
||||
# Use optimized task that generates both reports with shared queries
|
||||
generate_compliance_reports_task.si(
|
||||
tenant_id=tenant_id, scan_id=scan_id, provider_id=provider_id
|
||||
),
|
||||
check_integrations_task.si(
|
||||
@@ -82,6 +88,9 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
),
|
||||
),
|
||||
).apply_async()
|
||||
perform_attack_paths_scan_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@@ -277,6 +286,25 @@ def perform_scan_summary_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_findings(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
# TODO: This task must be queued at the `attack-paths` queue, don't forget to add it to the `docker-entrypoint.sh` file
|
||||
@shared_task(base=RLSTask, bind=True, name="attack-paths-scan-perform", queue="scans")
|
||||
def perform_attack_paths_scan_task(self, tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Execute an Attack Paths scan for the given provider within the current tenant RLS context.
|
||||
|
||||
Args:
|
||||
self: The task instance (automatically passed when bind=True).
|
||||
tenant_id (str): The tenant identifier for RLS context.
|
||||
scan_id (str): The Prowler scan identifier for obtaining the tenant and provider context.
|
||||
|
||||
Returns:
|
||||
Any: The result from `attack_paths_scan`, including any per-scan failure details.
|
||||
"""
|
||||
return attack_paths_scan(
|
||||
tenant_id=tenant_id, scan_id=scan_id, task_id=self.request.id
|
||||
)
|
||||
|
||||
|
||||
@shared_task(name="tenant-deletion", queue="deletion", autoretry_for=(Exception,))
|
||||
def delete_tenant_task(tenant_id: str):
|
||||
return delete_tenant(pk=tenant_id)
|
||||
@@ -316,7 +344,7 @@ def generate_outputs_task(scan_id: str, provider_id: str, tenant_id: str):
|
||||
|
||||
frameworks_bulk = Compliance.get_bulk(provider_type)
|
||||
frameworks_avail = get_compliance_frameworks(provider_type)
|
||||
out_dir, comp_dir, _ = _generate_output_directory(
|
||||
out_dir, comp_dir = _generate_output_directory(
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY, provider_uid, tenant_id, scan_id
|
||||
)
|
||||
|
||||
@@ -494,6 +522,21 @@ def backfill_scan_resource_summaries_task(tenant_id: str, scan_id: str):
|
||||
return backfill_resource_scan_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="backfill-compliance-summaries", queue="backfill")
|
||||
def backfill_compliance_summaries_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Tries to backfill compliance overview summaries for a completed scan.
|
||||
|
||||
This task aggregates compliance requirement data across regions
|
||||
to create pre-computed summary records for fast compliance overview queries.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
"""
|
||||
return backfill_compliance_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="scan-compliance-overviews", queue="compliance")
|
||||
def create_compliance_requirements_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
@@ -668,19 +711,33 @@ def jira_integration_task(
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask,
|
||||
name="scan-threatscore-report",
|
||||
name="scan-compliance-reports",
|
||||
queue="scan-reports",
|
||||
)
|
||||
def generate_threatscore_report_task(tenant_id: str, scan_id: str, provider_id: str):
|
||||
def generate_compliance_reports_task(tenant_id: str, scan_id: str, provider_id: str):
|
||||
"""
|
||||
Task to generate a threatscore report for a given scan.
|
||||
Optimized task to generate ThreatScore, ENS, and NIS2 reports with shared queries.
|
||||
|
||||
This task is more efficient than running separate report tasks because it reuses database queries:
|
||||
- Provider object fetched once (instead of three times)
|
||||
- Requirement statistics aggregated once (instead of three times)
|
||||
- Can reduce database load by up to 50-70%
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
provider_id (str): The provider identifier.
|
||||
|
||||
Returns:
|
||||
dict: Results for all reports containing upload status and paths.
|
||||
"""
|
||||
return generate_threatscore_report_job(
|
||||
tenant_id=tenant_id, scan_id=scan_id, provider_id=provider_id
|
||||
return generate_compliance_reports_job(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
provider_id=provider_id,
|
||||
generate_threatscore=True,
|
||||
generate_ens=True,
|
||||
generate_nis2=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,416 @@
|
||||
from contextlib import nullcontext
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, call, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from api.models import (
|
||||
AttackPathsScan,
|
||||
Finding,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
Scan,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
)
|
||||
from prowler.lib.check.models import Severity
|
||||
from tasks.jobs.attack_paths import prowler as prowler_module
|
||||
from tasks.jobs.attack_paths.scan import run as attack_paths_run
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAttackPathsRun:
|
||||
def test_run_success_flow(self, tenants_fixture, providers_fixture, scans_fixture):
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
scan = scans_fixture[0]
|
||||
scan.provider = provider
|
||||
scan.save()
|
||||
|
||||
attack_paths_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
scan=scan,
|
||||
state=StateChoices.SCHEDULED,
|
||||
)
|
||||
|
||||
mock_session = MagicMock()
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.return_value = mock_session
|
||||
session_ctx.__exit__.return_value = False
|
||||
ingestion_result = {"organizations": "warning"}
|
||||
ingestion_fn = MagicMock(return_value=ingestion_result)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.initialize_prowler_provider",
|
||||
return_value=MagicMock(_enabled_regions=["us-east-1"]),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_uri",
|
||||
return_value="bolt://neo4j",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_database_name",
|
||||
return_value="db-scan-id",
|
||||
) as mock_get_db_name,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.create_database"
|
||||
) as mock_create_db,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_session",
|
||||
return_value=session_ctx,
|
||||
) as mock_get_session,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_create_indexes.run"
|
||||
) as mock_cartography_indexes,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_analysis.run"
|
||||
) as mock_cartography_analysis,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_ontology.run"
|
||||
) as mock_cartography_ontology,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.prowler.create_indexes"
|
||||
) as mock_prowler_indexes,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.prowler.analysis"
|
||||
) as mock_prowler_analysis,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=attack_paths_scan,
|
||||
) as mock_retrieve_scan,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan"
|
||||
) as mock_starting,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress"
|
||||
) as mock_update_progress,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.get_cartography_ingestion_function",
|
||||
return_value=ingestion_fn,
|
||||
) as mock_get_ingestion,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan._call_within_event_loop",
|
||||
side_effect=lambda fn, *a, **kw: fn(*a, **kw),
|
||||
) as mock_event_loop,
|
||||
):
|
||||
result = attack_paths_run(str(tenant.id), str(scan.id), "task-123")
|
||||
|
||||
assert result == ingestion_result
|
||||
mock_retrieve_scan.assert_called_once_with(str(tenant.id), str(scan.id))
|
||||
mock_starting.assert_called_once()
|
||||
config = mock_starting.call_args[0][2]
|
||||
assert config.neo4j_database == "db-scan-id"
|
||||
|
||||
mock_create_db.assert_called_once_with("db-scan-id")
|
||||
mock_get_session.assert_called_once_with("db-scan-id")
|
||||
mock_cartography_indexes.assert_called_once_with(mock_session, config)
|
||||
mock_prowler_indexes.assert_called_once_with(mock_session)
|
||||
mock_cartography_analysis.assert_called_once_with(mock_session, config)
|
||||
mock_cartography_ontology.assert_called_once_with(mock_session, config)
|
||||
mock_prowler_analysis.assert_called_once_with(
|
||||
mock_session,
|
||||
provider,
|
||||
str(scan.id),
|
||||
config,
|
||||
)
|
||||
assert mock_get_ingestion.call_args_list == [
|
||||
call(provider.provider),
|
||||
call(provider.provider),
|
||||
]
|
||||
mock_event_loop.assert_called_once()
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 1)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 2)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 95)
|
||||
mock_finish.assert_called_once_with(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_result
|
||||
)
|
||||
mock_get_db_name.assert_called_once_with(attack_paths_scan.id)
|
||||
|
||||
def test_run_failure_marks_scan_failed(
|
||||
self, tenants_fixture, providers_fixture, scans_fixture
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
scan = scans_fixture[0]
|
||||
scan.provider = provider
|
||||
scan.save()
|
||||
|
||||
attack_paths_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
scan=scan,
|
||||
state=StateChoices.SCHEDULED,
|
||||
)
|
||||
|
||||
mock_session = MagicMock()
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.return_value = mock_session
|
||||
session_ctx.__exit__.return_value = False
|
||||
ingestion_fn = MagicMock(side_effect=RuntimeError("ingestion boom"))
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.initialize_prowler_provider",
|
||||
return_value=MagicMock(_enabled_regions=["us-east-1"]),
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.scan.graph_database.get_uri"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_database_name",
|
||||
return_value="db-scan-id",
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.scan.graph_database.create_database"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_session",
|
||||
return_value=session_ctx,
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run"),
|
||||
patch("tasks.jobs.attack_paths.scan.cartography_analysis.run"),
|
||||
patch("tasks.jobs.attack_paths.scan.prowler.create_indexes"),
|
||||
patch("tasks.jobs.attack_paths.scan.prowler.analysis"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=attack_paths_scan,
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress"
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.get_cartography_ingestion_function",
|
||||
return_value=ingestion_fn,
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan._call_within_event_loop",
|
||||
side_effect=lambda fn, *a, **kw: fn(*a, **kw),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.utils.stringify_exception",
|
||||
return_value="Cartography failed: ingestion boom",
|
||||
),
|
||||
):
|
||||
with pytest.raises(RuntimeError, match="ingestion boom"):
|
||||
attack_paths_run(str(tenant.id), str(scan.id), "task-456")
|
||||
|
||||
failure_args = mock_finish.call_args[0]
|
||||
assert failure_args[0] is attack_paths_scan
|
||||
assert failure_args[1] == StateChoices.FAILED
|
||||
assert failure_args[2] == {
|
||||
"global_cartography_error": "Cartography failed: ingestion boom"
|
||||
}
|
||||
|
||||
def test_run_returns_early_for_unsupported_provider(self, tenants_fixture):
|
||||
tenant = tenants_fixture[0]
|
||||
provider = Provider.objects.create(
|
||||
provider=Provider.ProviderChoices.GCP,
|
||||
uid="gcp-account",
|
||||
alias="gcp",
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
scan = Scan.objects.create(
|
||||
name="GCP Scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.initialize_prowler_provider",
|
||||
return_value=MagicMock(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.get_cartography_ingestion_function",
|
||||
return_value=None,
|
||||
) as mock_get_ingestion,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan"
|
||||
) as mock_retrieve,
|
||||
):
|
||||
result = attack_paths_run(str(tenant.id), str(scan.id), "task-789")
|
||||
|
||||
assert result == {}
|
||||
mock_get_ingestion.assert_called_once_with(provider.provider)
|
||||
mock_retrieve.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAttackPathsProwlerHelpers:
|
||||
def test_create_indexes_executes_all_statements(self):
|
||||
mock_session = MagicMock()
|
||||
with patch("tasks.jobs.attack_paths.prowler.run_write_query") as mock_run_write:
|
||||
prowler_module.create_indexes(mock_session)
|
||||
|
||||
assert mock_run_write.call_count == len(prowler_module.INDEX_STATEMENTS)
|
||||
mock_run_write.assert_has_calls(
|
||||
[call(mock_session, stmt) for stmt in prowler_module.INDEX_STATEMENTS]
|
||||
)
|
||||
|
||||
def test_load_findings_batches_requests(self, providers_fixture):
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
findings = [
|
||||
{"id": "1", "resource_uid": "r-1"},
|
||||
{"id": "2", "resource_uid": "r-2"},
|
||||
]
|
||||
config = SimpleNamespace(update_tag=12345)
|
||||
mock_session = MagicMock()
|
||||
|
||||
with (
|
||||
patch.object(prowler_module, "BATCH_SIZE", 1),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_root_node_label",
|
||||
return_value="AWSAccount",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_node_uid_field",
|
||||
return_value="arn",
|
||||
),
|
||||
):
|
||||
prowler_module.load_findings(mock_session, findings, provider, config)
|
||||
|
||||
assert mock_session.run.call_count == 2
|
||||
for call_args in mock_session.run.call_args_list:
|
||||
params = call_args.args[1]
|
||||
assert params["provider_uid"] == str(provider.uid)
|
||||
assert params["last_updated"] == config.update_tag
|
||||
assert "findings_data" in params
|
||||
|
||||
def test_cleanup_findings_runs_batches(self, providers_fixture):
|
||||
provider = providers_fixture[0]
|
||||
config = SimpleNamespace(update_tag=1024)
|
||||
mock_session = MagicMock()
|
||||
|
||||
first_batch = MagicMock()
|
||||
first_batch.single.return_value = {"deleted_findings_count": 3}
|
||||
second_batch = MagicMock()
|
||||
second_batch.single.return_value = {"deleted_findings_count": 0}
|
||||
mock_session.run.side_effect = [first_batch, second_batch]
|
||||
|
||||
prowler_module.cleanup_findings(mock_session, provider, config)
|
||||
|
||||
assert mock_session.run.call_count == 2
|
||||
params = mock_session.run.call_args.args[1]
|
||||
assert params["provider_uid"] == str(provider.uid)
|
||||
assert params["last_updated"] == config.update_tag
|
||||
|
||||
def test_get_provider_last_scan_findings_returns_latest_scan_data(
|
||||
self,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
resource = Resource.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
uid="resource-uid",
|
||||
name="Resource",
|
||||
region="us-east-1",
|
||||
service="ec2",
|
||||
type="instance",
|
||||
)
|
||||
|
||||
older_scan = Scan.objects.create(
|
||||
name="Older",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
old_finding = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
uid="older-finding",
|
||||
scan=older_scan,
|
||||
delta=Finding.DeltaChoices.NEW,
|
||||
status=StatusChoices.PASS,
|
||||
status_extended="ok",
|
||||
severity=Severity.low,
|
||||
impact=Severity.low,
|
||||
impact_extended="",
|
||||
raw_result={},
|
||||
check_id="check-old",
|
||||
check_metadata={"checktitle": "Old"},
|
||||
first_seen_at=older_scan.inserted_at,
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
resource=resource,
|
||||
finding=old_finding,
|
||||
)
|
||||
|
||||
latest_scan = Scan.objects.create(
|
||||
name="Latest",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
uid="finding-uid",
|
||||
scan=latest_scan,
|
||||
delta=Finding.DeltaChoices.NEW,
|
||||
status=StatusChoices.FAIL,
|
||||
status_extended="failed",
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
impact_extended="",
|
||||
raw_result={},
|
||||
check_id="check-1",
|
||||
check_metadata={"checktitle": "Check title"},
|
||||
first_seen_at=latest_scan.inserted_at,
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
resource=resource,
|
||||
finding=finding,
|
||||
)
|
||||
|
||||
latest_scan.refresh_from_db()
|
||||
|
||||
with patch(
|
||||
"tasks.jobs.attack_paths.prowler.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
):
|
||||
findings_data = prowler_module.get_provider_last_scan_findings(
|
||||
provider,
|
||||
str(latest_scan.id),
|
||||
)
|
||||
|
||||
assert len(findings_data) == 1
|
||||
finding_dict = findings_data[0]
|
||||
assert finding_dict["id"] == str(finding.id)
|
||||
assert finding_dict["resource_uid"] == resource.uid
|
||||
assert finding_dict["check_title"] == "Check title"
|
||||
assert finding_dict["scan_id"] == str(latest_scan.id)
|
||||
@@ -1,43 +1,53 @@
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_compliance_summaries,
|
||||
backfill_resource_scan_summaries,
|
||||
)
|
||||
|
||||
from api.models import ResourceScanSummary, Scan, StateChoices
|
||||
from api.models import (
|
||||
ComplianceOverviewSummary,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
StateChoices,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def resource_scan_summary_data(scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
return ResourceScanSummary.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
scan_id=scan.id,
|
||||
resource_id=str(uuid4()),
|
||||
service="aws",
|
||||
region="us-east-1",
|
||||
resource_type="instance",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_not_completed_scans(providers_fixture):
|
||||
provider_id = providers_fixture[0].id
|
||||
tenant_id = providers_fixture[0].tenant_id
|
||||
scan_1 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.EXECUTING,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
scan_2 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
return scan_1, scan_2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestBackfillResourceScanSummaries:
|
||||
@pytest.fixture(scope="function")
|
||||
def resource_scan_summary_data(self, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
return ResourceScanSummary.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
scan_id=scan.id,
|
||||
resource_id=str(uuid4()),
|
||||
service="aws",
|
||||
region="us-east-1",
|
||||
resource_type="instance",
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_not_completed_scans(self, providers_fixture):
|
||||
provider_id = providers_fixture[0].id
|
||||
tenant_id = providers_fixture[0].tenant_id
|
||||
scan_1 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.EXECUTING,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
scan_2 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
return scan_1, scan_2
|
||||
|
||||
def test_already_backfilled(self, resource_scan_summary_data):
|
||||
tenant_id = resource_scan_summary_data.tenant_id
|
||||
scan_id = resource_scan_summary_data.scan_id
|
||||
@@ -77,3 +87,88 @@ class TestBackfillResourceScanSummaries:
|
||||
assert summary.service == resource.service
|
||||
assert summary.region == resource.region
|
||||
assert summary.resource_type == resource.type
|
||||
|
||||
def test_no_resources_to_backfill(self, scans_fixture):
|
||||
scan = scans_fixture[1] # Failed scan with no findings/resources
|
||||
tenant_id = str(scan.tenant_id)
|
||||
scan_id = str(scan.id)
|
||||
|
||||
result = backfill_resource_scan_summaries(tenant_id, scan_id)
|
||||
|
||||
assert result == {"status": "no resources to backfill"}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestBackfillComplianceSummaries:
|
||||
def test_already_backfilled(self, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
tenant_id = str(scan.tenant_id)
|
||||
ComplianceOverviewSummary.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
scan=scan,
|
||||
compliance_id="aws_account_security_onboarding_aws",
|
||||
requirements_passed=1,
|
||||
requirements_failed=0,
|
||||
requirements_manual=0,
|
||||
total_requirements=1,
|
||||
)
|
||||
|
||||
result = backfill_compliance_summaries(tenant_id, str(scan.id))
|
||||
|
||||
assert result == {"status": "already backfilled"}
|
||||
|
||||
def test_not_completed_scan(self, get_not_completed_scans):
|
||||
for scan in get_not_completed_scans:
|
||||
result = backfill_compliance_summaries(str(scan.tenant_id), str(scan.id))
|
||||
assert result == {"status": "scan is not completed"}
|
||||
|
||||
def test_no_compliance_data(self, scans_fixture):
|
||||
scan = scans_fixture[1] # Failed scan with no compliance rows
|
||||
|
||||
result = backfill_compliance_summaries(str(scan.tenant_id), str(scan.id))
|
||||
|
||||
assert result == {"status": "no compliance data to backfill"}
|
||||
|
||||
def test_backfill_creates_compliance_summaries(
|
||||
self, tenants_fixture, scans_fixture, compliance_requirements_overviews_fixture
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
result = backfill_compliance_summaries(str(tenant.id), str(scan.id))
|
||||
|
||||
expected = {
|
||||
"aws_account_security_onboarding_aws": {
|
||||
"requirements_passed": 1,
|
||||
"requirements_failed": 1,
|
||||
"requirements_manual": 1,
|
||||
"total_requirements": 3,
|
||||
},
|
||||
"cis_1.4_aws": {
|
||||
"requirements_passed": 0,
|
||||
"requirements_failed": 1,
|
||||
"requirements_manual": 0,
|
||||
"total_requirements": 1,
|
||||
},
|
||||
"mitre_attack_aws": {
|
||||
"requirements_passed": 0,
|
||||
"requirements_failed": 1,
|
||||
"requirements_manual": 0,
|
||||
"total_requirements": 1,
|
||||
},
|
||||
}
|
||||
|
||||
assert result == {"status": "backfilled", "inserted": len(expected)}
|
||||
|
||||
summaries = ComplianceOverviewSummary.objects.filter(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan.id)
|
||||
)
|
||||
assert summaries.count() == len(expected)
|
||||
|
||||
for summary in summaries:
|
||||
assert summary.compliance_id in expected
|
||||
expected_counts = expected[summary.compliance_id]
|
||||
assert summary.requirements_passed == expected_counts["requirements_passed"]
|
||||
assert summary.requirements_failed == expected_counts["requirements_failed"]
|
||||
assert summary.requirements_manual == expected_counts["requirements_manual"]
|
||||
assert summary.total_requirements == expected_counts["total_requirements"]
|
||||
|
||||
@@ -1,27 +1,60 @@
|
||||
from unittest.mock import call, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
|
||||
from api.models import Provider, Tenant
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestDeleteProvider:
|
||||
def test_delete_provider_success(self, providers_fixture):
|
||||
instance = providers_fixture[0]
|
||||
tenant_id = str(instance.tenant_id)
|
||||
result = delete_provider(tenant_id, instance.id)
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
graph_db_names = ["graph-db-1", "graph-db-2"]
|
||||
mock_get_provider_graph_database_names.return_value = graph_db_names
|
||||
|
||||
assert result
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
Provider.objects.get(pk=instance.id)
|
||||
instance = providers_fixture[0]
|
||||
tenant_id = str(instance.tenant_id)
|
||||
result = delete_provider(tenant_id, instance.id)
|
||||
|
||||
assert result
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
Provider.objects.get(pk=instance.id)
|
||||
|
||||
mock_get_provider_graph_database_names.assert_called_once_with(
|
||||
tenant_id, instance.id
|
||||
)
|
||||
mock_drop_database.assert_has_calls(
|
||||
[call(graph_db_name) for graph_db_name in graph_db_names]
|
||||
)
|
||||
|
||||
def test_delete_provider_does_not_exist(self, tenants_fixture):
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
non_existent_pk = "babf6796-cfcc-4fd3-9dcf-88d012247645"
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
graph_db_names = ["graph-db-1"]
|
||||
mock_get_provider_graph_database_names.return_value = graph_db_names
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
delete_provider(tenant_id, non_existent_pk)
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
non_existent_pk = "babf6796-cfcc-4fd3-9dcf-88d012247645"
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
delete_provider(tenant_id, non_existent_pk)
|
||||
|
||||
mock_get_provider_graph_database_names.assert_called_once_with(
|
||||
tenant_id, non_existent_pk
|
||||
)
|
||||
mock_drop_database.assert_has_calls(
|
||||
[call(graph_db_name) for graph_db_name in graph_db_names]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -30,33 +63,68 @@ class TestDeleteTenant:
|
||||
"""
|
||||
Test successful deletion of a tenant and its related data.
|
||||
"""
|
||||
tenant = tenants_fixture[0]
|
||||
providers = Provider.objects.filter(tenant_id=tenant.id)
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
tenant = tenants_fixture[0]
|
||||
providers = list(Provider.objects.filter(tenant_id=tenant.id))
|
||||
|
||||
# Ensure the tenant and related providers exist before deletion
|
||||
assert Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert providers.exists()
|
||||
graph_db_names_per_provider = [
|
||||
[f"graph-db-{provider.id}"] for provider in providers
|
||||
]
|
||||
mock_get_provider_graph_database_names.side_effect = (
|
||||
graph_db_names_per_provider
|
||||
)
|
||||
|
||||
# Call the function and validate the result
|
||||
deletion_summary = delete_tenant(tenant.id)
|
||||
# Ensure the tenant and related providers exist before deletion
|
||||
assert Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert providers
|
||||
|
||||
assert deletion_summary is not None
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert not Provider.objects.filter(tenant_id=tenant.id).exists()
|
||||
# Call the function and validate the result
|
||||
deletion_summary = delete_tenant(tenant.id)
|
||||
|
||||
assert deletion_summary is not None
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert not Provider.objects.filter(tenant_id=tenant.id).exists()
|
||||
|
||||
expected_calls = [
|
||||
call(provider.tenant_id, provider.id) for provider in providers
|
||||
]
|
||||
mock_get_provider_graph_database_names.assert_has_calls(
|
||||
expected_calls, any_order=True
|
||||
)
|
||||
assert mock_get_provider_graph_database_names.call_count == len(
|
||||
expected_calls
|
||||
)
|
||||
expected_drop_calls = [
|
||||
call(graph_db_name[0]) for graph_db_name in graph_db_names_per_provider
|
||||
]
|
||||
mock_drop_database.assert_has_calls(expected_drop_calls, any_order=True)
|
||||
assert mock_drop_database.call_count == len(expected_drop_calls)
|
||||
|
||||
def test_delete_tenant_with_no_providers(self, tenants_fixture):
|
||||
"""
|
||||
Test deletion of a tenant with no related providers.
|
||||
"""
|
||||
tenant = tenants_fixture[1] # Assume this tenant has no providers
|
||||
providers = Provider.objects.filter(tenant_id=tenant.id)
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
tenant = tenants_fixture[1] # Assume this tenant has no providers
|
||||
providers = Provider.objects.filter(tenant_id=tenant.id)
|
||||
|
||||
# Ensure the tenant exists but has no related providers
|
||||
assert Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert not providers.exists()
|
||||
# Ensure the tenant exists but has no related providers
|
||||
assert Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert not providers.exists()
|
||||
|
||||
# Call the function and validate the result
|
||||
deletion_summary = delete_tenant(tenant.id)
|
||||
# Call the function and validate the result
|
||||
deletion_summary = delete_tenant(tenant.id)
|
||||
|
||||
assert deletion_summary == {} # No providers, so empty summary
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert deletion_summary == {} # No providers, so empty summary
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
|
||||
mock_get_provider_graph_database_names.assert_not_called()
|
||||
mock_drop_database.assert_not_called()
|
||||
|
||||
@@ -9,6 +9,7 @@ import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from tasks.jobs.export import (
|
||||
_compress_output_files,
|
||||
_generate_compliance_output_directory,
|
||||
_generate_output_directory,
|
||||
_upload_to_s3,
|
||||
get_s3_client,
|
||||
@@ -147,10 +148,11 @@ class TestOutputs:
|
||||
)
|
||||
mock_logger.assert_called()
|
||||
|
||||
@patch("tasks.jobs.export.set_output_timestamp")
|
||||
@patch("tasks.jobs.export.rls_transaction")
|
||||
@patch("tasks.jobs.export.Scan")
|
||||
def test_generate_output_directory_creates_paths(
|
||||
self, mock_scan, mock_rls_transaction, tmpdir
|
||||
self, mock_scan, mock_rls_transaction, mock_set_timestamp, tmpdir
|
||||
):
|
||||
# Mock the scan object with a started_at timestamp
|
||||
mock_scan_instance = MagicMock()
|
||||
@@ -168,22 +170,40 @@ class TestOutputs:
|
||||
provider = "aws"
|
||||
expected_timestamp = "20230615103045"
|
||||
|
||||
path, compliance, threatscore = _generate_output_directory(
|
||||
# Test _generate_output_directory (returns standard and compliance paths)
|
||||
path, compliance = _generate_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id
|
||||
)
|
||||
|
||||
assert os.path.isdir(os.path.dirname(path))
|
||||
assert os.path.isdir(os.path.dirname(compliance))
|
||||
assert os.path.isdir(os.path.dirname(threatscore))
|
||||
|
||||
assert path.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert compliance.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert threatscore.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert "/compliance/" in compliance
|
||||
|
||||
# Test _generate_compliance_output_directory with "threatscore"
|
||||
threatscore = _generate_compliance_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id, compliance_framework="threatscore"
|
||||
)
|
||||
|
||||
assert os.path.isdir(os.path.dirname(threatscore))
|
||||
assert threatscore.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert "/threatscore/" in threatscore
|
||||
|
||||
# Test _generate_compliance_output_directory with "ens"
|
||||
ens = _generate_compliance_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id, compliance_framework="ens"
|
||||
)
|
||||
|
||||
assert os.path.isdir(os.path.dirname(ens))
|
||||
assert ens.endswith(f"{provider}-{expected_timestamp}")
|
||||
assert "/ens/" in ens
|
||||
|
||||
@patch("tasks.jobs.export.set_output_timestamp")
|
||||
@patch("tasks.jobs.export.rls_transaction")
|
||||
@patch("tasks.jobs.export.Scan")
|
||||
def test_generate_output_directory_invalid_character(
|
||||
self, mock_scan, mock_rls_transaction, tmpdir
|
||||
self, mock_scan, mock_rls_transaction, mock_set_timestamp, tmpdir
|
||||
):
|
||||
# Mock the scan object with a started_at timestamp
|
||||
mock_scan_instance = MagicMock()
|
||||
@@ -201,14 +221,25 @@ class TestOutputs:
|
||||
provider = "aws/test@check"
|
||||
expected_timestamp = "20230615103045"
|
||||
|
||||
path, compliance, threatscore = _generate_output_directory(
|
||||
# Test provider name sanitization with _generate_output_directory
|
||||
path, compliance = _generate_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id
|
||||
)
|
||||
|
||||
assert os.path.isdir(os.path.dirname(path))
|
||||
assert os.path.isdir(os.path.dirname(compliance))
|
||||
assert os.path.isdir(os.path.dirname(threatscore))
|
||||
|
||||
assert path.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
assert compliance.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
|
||||
# Test provider name sanitization with _generate_compliance_output_directory
|
||||
threatscore = _generate_compliance_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id, compliance_framework="threatscore"
|
||||
)
|
||||
ens = _generate_compliance_output_directory(
|
||||
base_dir, provider, tenant_id, scan_id, compliance_framework="ens"
|
||||
)
|
||||
|
||||
assert os.path.isdir(os.path.dirname(threatscore))
|
||||
assert os.path.isdir(os.path.dirname(ens))
|
||||
assert threatscore.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
assert ens.endswith(f"aws-test-check-{expected_timestamp}")
|
||||
|
||||
@@ -1,24 +1,28 @@
|
||||
import uuid
|
||||
|
||||
from contextlib import contextmanager
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from tasks.tasks import (
|
||||
_perform_scan_complete_tasks,
|
||||
check_integrations_task,
|
||||
check_lighthouse_provider_connection_task,
|
||||
generate_outputs_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
s3_integration_task,
|
||||
security_hub_integration_task,
|
||||
)
|
||||
|
||||
from api.models import (
|
||||
Integration,
|
||||
LighthouseProviderConfiguration,
|
||||
LighthouseProviderModels,
|
||||
)
|
||||
from tasks.tasks import (
|
||||
_perform_scan_complete_tasks,
|
||||
check_integrations_task,
|
||||
check_lighthouse_provider_connection_task,
|
||||
generate_outputs_task,
|
||||
perform_attack_paths_scan_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
s3_integration_task,
|
||||
security_hub_integration_task,
|
||||
)
|
||||
|
||||
|
||||
# TODO Move this to outputs/reports jobs
|
||||
@@ -109,7 +113,6 @@ class TestGenerateOutputs:
|
||||
return_value=(
|
||||
"/tmp/test/out-dir",
|
||||
"/tmp/test/comp-dir",
|
||||
"/tmp/test/threat-dir",
|
||||
),
|
||||
),
|
||||
patch("tasks.tasks.Scan.all_objects.filter") as mock_scan_update,
|
||||
@@ -139,7 +142,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks.Finding.all_objects.filter") as mock_findings,
|
||||
patch(
|
||||
"tasks.tasks._generate_output_directory",
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp", "/tmp/test/threat"),
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp"),
|
||||
),
|
||||
patch("tasks.tasks.FindingOutput._transform_findings_stats"),
|
||||
patch("tasks.tasks.FindingOutput.transform_api_finding"),
|
||||
@@ -209,7 +212,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks.Finding.all_objects.filter") as mock_findings,
|
||||
patch(
|
||||
"tasks.tasks._generate_output_directory",
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp", "/tmp/test/threat"),
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp"),
|
||||
),
|
||||
patch(
|
||||
"tasks.tasks.FindingOutput._transform_findings_stats",
|
||||
@@ -289,7 +292,6 @@ class TestGenerateOutputs:
|
||||
return_value=(
|
||||
"/tmp/test/outdir",
|
||||
"/tmp/test/compdir",
|
||||
"/tmp/test/threatdir",
|
||||
),
|
||||
),
|
||||
patch("tasks.tasks._compress_output_files", return_value="outdir.zip"),
|
||||
@@ -368,7 +370,6 @@ class TestGenerateOutputs:
|
||||
return_value=(
|
||||
"/tmp/test/outdir",
|
||||
"/tmp/test/compdir",
|
||||
"/tmp/test/threatdir",
|
||||
),
|
||||
),
|
||||
patch("tasks.tasks.FindingOutput._transform_findings_stats"),
|
||||
@@ -436,7 +437,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks.Finding.all_objects.filter") as mock_findings,
|
||||
patch(
|
||||
"tasks.tasks._generate_output_directory",
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp", "/tmp/test/threat"),
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp"),
|
||||
),
|
||||
patch(
|
||||
"tasks.tasks.FindingOutput._transform_findings_stats",
|
||||
@@ -494,7 +495,7 @@ class TestGenerateOutputs:
|
||||
patch("tasks.tasks.Finding.all_objects.filter") as mock_findings,
|
||||
patch(
|
||||
"tasks.tasks._generate_output_directory",
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp", "/tmp/test/threat"),
|
||||
return_value=("/tmp/test/out", "/tmp/test/comp"),
|
||||
),
|
||||
patch("tasks.tasks.FindingOutput._transform_findings_stats"),
|
||||
patch("tasks.tasks.FindingOutput.transform_api_finding"),
|
||||
@@ -532,43 +533,118 @@ class TestGenerateOutputs:
|
||||
|
||||
|
||||
class TestScanCompleteTasks:
|
||||
@patch("tasks.tasks.perform_attack_paths_scan_task.apply_async")
|
||||
@patch("tasks.tasks.create_compliance_requirements_task.apply_async")
|
||||
@patch("tasks.tasks.perform_scan_summary_task.si")
|
||||
@patch("tasks.tasks.generate_outputs_task.si")
|
||||
@patch("tasks.tasks.generate_threatscore_report_task.si")
|
||||
@patch("tasks.tasks.generate_compliance_reports_task.si")
|
||||
@patch("tasks.tasks.check_integrations_task.si")
|
||||
def test_scan_complete_tasks(
|
||||
self,
|
||||
mock_check_integrations_task,
|
||||
mock_threatscore_task,
|
||||
mock_compliance_reports_task,
|
||||
mock_outputs_task,
|
||||
mock_scan_summary_task,
|
||||
mock_compliance_tasks,
|
||||
mock_compliance_requirements_task,
|
||||
mock_attack_paths_task,
|
||||
):
|
||||
"""Test that scan complete tasks are properly orchestrated with optimized reports."""
|
||||
_perform_scan_complete_tasks("tenant-id", "scan-id", "provider-id")
|
||||
mock_compliance_tasks.assert_called_once_with(
|
||||
|
||||
# Verify compliance requirements task is called
|
||||
mock_compliance_requirements_task.assert_called_once_with(
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"},
|
||||
)
|
||||
|
||||
# Verify scan summary task is called
|
||||
mock_scan_summary_task.assert_called_once_with(
|
||||
scan_id="scan-id",
|
||||
tenant_id="tenant-id",
|
||||
)
|
||||
|
||||
# Verify outputs task is called
|
||||
mock_outputs_task.assert_called_once_with(
|
||||
scan_id="scan-id",
|
||||
provider_id="provider-id",
|
||||
tenant_id="tenant-id",
|
||||
)
|
||||
mock_threatscore_task.assert_called_once_with(
|
||||
|
||||
# Verify optimized compliance reports task is called (replaces individual tasks)
|
||||
mock_compliance_reports_task.assert_called_once_with(
|
||||
tenant_id="tenant-id",
|
||||
scan_id="scan-id",
|
||||
provider_id="provider-id",
|
||||
)
|
||||
|
||||
# Verify integrations task is called
|
||||
mock_check_integrations_task.assert_called_once_with(
|
||||
tenant_id="tenant-id",
|
||||
provider_id="provider-id",
|
||||
scan_id="scan-id",
|
||||
)
|
||||
|
||||
mock_attack_paths_task.assert_called_once_with(
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"}
|
||||
)
|
||||
|
||||
|
||||
class TestAttackPathsTasks:
|
||||
@staticmethod
|
||||
@contextmanager
|
||||
def _override_task_request(task, **attrs):
|
||||
request = task.request
|
||||
sentinel = object()
|
||||
previous = {key: getattr(request, key, sentinel) for key in attrs}
|
||||
for key, value in attrs.items():
|
||||
setattr(request, key, value)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for key, prev in previous.items():
|
||||
if prev is sentinel:
|
||||
if hasattr(request, key):
|
||||
delattr(request, key)
|
||||
else:
|
||||
setattr(request, key, prev)
|
||||
|
||||
def test_perform_attack_paths_scan_task_calls_runner(self):
|
||||
with (
|
||||
patch("tasks.tasks.attack_paths_scan") as mock_attack_paths_scan,
|
||||
self._override_task_request(
|
||||
perform_attack_paths_scan_task, id="celery-task-id"
|
||||
),
|
||||
):
|
||||
mock_attack_paths_scan.return_value = {"status": "ok"}
|
||||
|
||||
result = perform_attack_paths_scan_task.run(
|
||||
tenant_id="tenant-id", scan_id="scan-id"
|
||||
)
|
||||
|
||||
mock_attack_paths_scan.assert_called_once_with(
|
||||
tenant_id="tenant-id", scan_id="scan-id", task_id="celery-task-id"
|
||||
)
|
||||
assert result == {"status": "ok"}
|
||||
|
||||
def test_perform_attack_paths_scan_task_propagates_exception(self):
|
||||
with (
|
||||
patch(
|
||||
"tasks.tasks.attack_paths_scan",
|
||||
side_effect=RuntimeError("Exception to propagate"),
|
||||
) as mock_attack_paths_scan,
|
||||
self._override_task_request(
|
||||
perform_attack_paths_scan_task, id="celery-task-error"
|
||||
),
|
||||
):
|
||||
with pytest.raises(RuntimeError, match="Exception to propagate"):
|
||||
perform_attack_paths_scan_task.run(
|
||||
tenant_id="tenant-id", scan_id="scan-id"
|
||||
)
|
||||
|
||||
mock_attack_paths_scan.assert_called_once_with(
|
||||
tenant_id="tenant-id", scan_id="scan-id", task_id="celery-task-error"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCheckIntegrationsTask:
|
||||
@@ -738,7 +814,7 @@ class TestCheckIntegrationsTask:
|
||||
mock_initialize_provider.return_value = MagicMock()
|
||||
mock_compliance_bulk.return_value = {}
|
||||
mock_get_frameworks.return_value = []
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir", "threat-dir")
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir")
|
||||
mock_transform_stats.return_value = {"stats": "data"}
|
||||
|
||||
# Mock findings
|
||||
@@ -863,7 +939,7 @@ class TestCheckIntegrationsTask:
|
||||
mock_initialize_provider.return_value = MagicMock()
|
||||
mock_compliance_bulk.return_value = {}
|
||||
mock_get_frameworks.return_value = []
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir", "threat-dir")
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir")
|
||||
mock_transform_stats.return_value = {"stats": "data"}
|
||||
|
||||
# Mock findings
|
||||
@@ -979,7 +1055,7 @@ class TestCheckIntegrationsTask:
|
||||
mock_initialize_provider.return_value = MagicMock()
|
||||
mock_compliance_bulk.return_value = {}
|
||||
mock_get_frameworks.return_value = []
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir", "threat-dir")
|
||||
mock_generate_dir.return_value = ("out-dir", "comp-dir")
|
||||
mock_transform_stats.return_value = {"stats": "data"}
|
||||
|
||||
# Mock findings
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
# Shorten the long FedRAMP KSI descriptions for better display
|
||||
ksi_short_names = {
|
||||
"A secure cloud service offering will protect user data, control access, and apply zero trust principles": "Identity and Access Management",
|
||||
"A secure cloud service offering will use cloud native architecture and design principles to enforce and enhance the Confidentiality, Integrity and Availability of the system": "Cloud Native Architecture",
|
||||
"A secure cloud service provider will ensure that all system changes are properly documented and configuration baselines are updated accordingly": "Change Management",
|
||||
"A secure cloud service provider will continuously educate their employees on cybersecurity measures, testing them regularly": "Cybersecurity Education",
|
||||
"A secure cloud service offering will document, report, and analyze security incidents to ensure regulatory compliance and continuous security improvement": "Incident Reporting",
|
||||
"A secure cloud service offering will monitor, log, and audit all important events, activity, and changes": "Monitoring, Logging, and Auditing",
|
||||
"A secure cloud service offering will have intentional, organized, universal guidance for how every information resource, including personnel, is secured": "Policy and Inventory",
|
||||
"A secure cloud service offering will define, maintain, and test incident response plan(s) and recovery capabilities to ensure minimal service disruption and data loss": "Recovery Planning",
|
||||
"A secure cloud service offering will follow FedRAMP encryption policies, continuously verify information resource integrity, and restrict access to third-party information resources": "Service Configuration",
|
||||
"A secure cloud service offering will understand, monitor, and manage supply chain risks from third-party information resources": "Third-Party Information Resources",
|
||||
}
|
||||
|
||||
# Replace long descriptions with short names - use contains for partial matching
|
||||
if not aux.empty:
|
||||
for long_desc, short_name in ksi_short_names.items():
|
||||
mask = aux["REQUIREMENTS_DESCRIPTION"].str.contains(
|
||||
long_desc, na=False, regex=False
|
||||
)
|
||||
aux.loc[mask, "REQUIREMENTS_DESCRIPTION"] = short_name
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,46 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
# Shorten the long FedRAMP KSI descriptions for better display
|
||||
ksi_short_names = {
|
||||
"A secure cloud service offering will protect user data, control access, and apply zero trust principles": "Identity and Access Management",
|
||||
"A secure cloud service offering will use cloud native architecture and design principles to enforce and enhance the Confidentiality, Integrity and Availability of the system": "Cloud Native Architecture",
|
||||
"A secure cloud service provider will ensure that all system changes are properly documented and configuration baselines are updated accordingly": "Change Management",
|
||||
"A secure cloud service provider will continuously educate their employees on cybersecurity measures, testing them regularly": "Cybersecurity Education",
|
||||
"A secure cloud service offering will document, report, and analyze security incidents to ensure regulatory compliance and continuous security improvement": "Incident Reporting",
|
||||
"A secure cloud service offering will monitor, log, and audit all important events, activity, and changes": "Monitoring, Logging, and Auditing",
|
||||
"A secure cloud service offering will have intentional, organized, universal guidance for how every information resource, including personnel, is secured": "Policy and Inventory",
|
||||
"A secure cloud service offering will define, maintain, and test incident response plan(s) and recovery capabilities to ensure minimal service disruption and data loss": "Recovery Planning",
|
||||
"A secure cloud service offering will follow FedRAMP encryption policies, continuously verify information resource integrity, and restrict access to third-party information resources": "Service Configuration",
|
||||
"A secure cloud service offering will understand, monitor, and manage supply chain risks from third-party information resources": "Third-Party Information Resources",
|
||||
}
|
||||
|
||||
# Replace long descriptions with short names - use contains for partial matching
|
||||
if not aux.empty:
|
||||
for long_desc, short_name in ksi_short_names.items():
|
||||
mask = aux["REQUIREMENTS_DESCRIPTION"].str.contains(
|
||||
long_desc, na=False, regex=False
|
||||
)
|
||||
aux.loc[mask, "REQUIREMENTS_DESCRIPTION"] = short_name
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,46 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
# Shorten the long FedRAMP KSI descriptions for better display
|
||||
ksi_short_names = {
|
||||
"A secure cloud service offering will protect user data, control access, and apply zero trust principles": "Identity and Access Management",
|
||||
"A secure cloud service offering will use cloud native architecture and design principles to enforce and enhance the Confidentiality, Integrity and Availability of the system": "Cloud Native Architecture",
|
||||
"A secure cloud service provider will ensure that all system changes are properly documented and configuration baselines are updated accordingly": "Change Management",
|
||||
"A secure cloud service provider will continuously educate their employees on cybersecurity measures, testing them regularly": "Cybersecurity Education",
|
||||
"A secure cloud service offering will document, report, and analyze security incidents to ensure regulatory compliance and continuous security improvement": "Incident Reporting",
|
||||
"A secure cloud service offering will monitor, log, and audit all important events, activity, and changes": "Monitoring, Logging, and Auditing",
|
||||
"A secure cloud service offering will have intentional, organized, universal guidance for how every information resource, including personnel, is secured": "Policy and Inventory",
|
||||
"A secure cloud service offering will define, maintain, and test incident response plan(s) and recovery capabilities to ensure minimal service disruption and data loss": "Recovery Planning",
|
||||
"A secure cloud service offering will follow FedRAMP encryption policies, continuously verify information resource integrity, and restrict access to third-party information resources": "Service Configuration",
|
||||
"A secure cloud service offering will understand, monitor, and manage supply chain risks from third-party information resources": "Third-Party Information Resources",
|
||||
}
|
||||
|
||||
# Replace long descriptions with short names - use contains for partial matching
|
||||
if not aux.empty:
|
||||
for long_desc, short_name in ksi_short_names.items():
|
||||
mask = aux["REQUIREMENTS_DESCRIPTION"].str.contains(
|
||||
long_desc, na=False, regex=False
|
||||
)
|
||||
aux.loc[mask, "REQUIREMENTS_DESCRIPTION"] = short_name
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,28 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_threatscore
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_threatscore(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ID",
|
||||
)
|
||||
@@ -1,6 +1,7 @@
|
||||
services:
|
||||
api-dev:
|
||||
hostname: "prowler-api"
|
||||
# image: prowler-api-dev
|
||||
build:
|
||||
context: ./api
|
||||
dockerfile: Dockerfile
|
||||
@@ -24,6 +25,8 @@ services:
|
||||
condition: service_healthy
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
entrypoint:
|
||||
- "/home/prowler/docker-entrypoint.sh"
|
||||
- "dev"
|
||||
@@ -78,7 +81,41 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
neo4j:
|
||||
image: graphstack/dozerdb:5.26.3.0
|
||||
hostname: "neo4j"
|
||||
volumes:
|
||||
- ./_data/neo4j:/data
|
||||
environment:
|
||||
# We can't add our .env file because some of our current variables are not compatible with Neo4j env vars
|
||||
# Auth
|
||||
- NEO4J_AUTH=${NEO4J_USER}/${NEO4J_PASSWORD}
|
||||
# Memory limits
|
||||
- NEO4J_dbms_max__databases=${NEO4J_DBMS_MAX__DATABASES:-1000000}
|
||||
- NEO4J_server_memory_pagecache_size=${NEO4J_SERVER_MEMORY_PAGECACHE_SIZE:-1G}
|
||||
- NEO4J_server_memory_heap_initial__size=${NEO4J_SERVER_MEMORY_HEAP_INITIAL__SIZE:-1G}
|
||||
- NEO4J_server_memory_heap_max__size=${NEO4J_SERVER_MEMORY_HEAP_MAX__SIZE:-1G}
|
||||
# APOC
|
||||
- apoc.export.file.enabled=${NEO4J_POC_EXPORT_FILE_ENABLED:-true}
|
||||
- apoc.import.file.enabled=${NEO4J_APOC_IMPORT_FILE_ENABLED:-true}
|
||||
- apoc.import.file.use_neo4j_config=${NEO4J_APOC_IMPORT_FILE_USE_NEO4J_CONFIG:-true}
|
||||
- "NEO4J_PLUGINS=${NEO4J_PLUGINS:-[\"apoc\"]}"
|
||||
- "NEO4J_dbms_security_procedures_allowlist=${NEO4J_DBMS_SECURITY_PROCEDURES_ALLOWLIST:-apoc.*}"
|
||||
- "NEO4J_dbms_security_procedures_unrestricted=${NEO4J_DBMS_SECURITY_PROCEDURES_UNRESTRICTED:-apoc.*}"
|
||||
# Networking
|
||||
- "dbms.connector.bolt.listen_address=${NEO4J_DBMS_CONNECTOR_BOLT_LISTEN_ADDRESS:-0.0.0.0:7687}"
|
||||
# 7474 is the UI port
|
||||
ports:
|
||||
- 7474:7474
|
||||
- ${NEO4J_PORT:-7687}:7687
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "http://localhost:7474"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
|
||||
worker-dev:
|
||||
# image: prowler-api-dev
|
||||
build:
|
||||
context: ./api
|
||||
dockerfile: Dockerfile
|
||||
@@ -89,17 +126,23 @@ services:
|
||||
- path: .env
|
||||
required: false
|
||||
volumes:
|
||||
- "outputs:/tmp/prowler_api_output"
|
||||
- ./api/src/backend:/home/prowler/backend
|
||||
- ./api/pyproject.toml:/home/prowler/pyproject.toml
|
||||
- ./api/docker-entrypoint.sh:/home/prowler/docker-entrypoint.sh
|
||||
- outputs:/tmp/prowler_api_output
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
entrypoint:
|
||||
- "/home/prowler/docker-entrypoint.sh"
|
||||
- "worker"
|
||||
|
||||
worker-beat:
|
||||
# image: prowler-api-dev
|
||||
build:
|
||||
context: ./api
|
||||
dockerfile: Dockerfile
|
||||
@@ -114,6 +157,8 @@ services:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
entrypoint:
|
||||
- "../docker-entrypoint.sh"
|
||||
- "beat"
|
||||
|
||||
@@ -63,6 +63,37 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
neo4j:
|
||||
image: graphstack/dozerdb:5.26.3.0
|
||||
hostname: "neo4j"
|
||||
volumes:
|
||||
- ./_data/neo4j:/data
|
||||
environment:
|
||||
# We can't add our .env file because some of our current variables are not compatible with Neo4j env vars
|
||||
# Auth
|
||||
- NEO4J_AUTH=${NEO4J_USER}/${NEO4J_PASSWORD}
|
||||
# Memory limits
|
||||
- NEO4J_dbms_max__databases=${NEO4J_DBMS_MAX__DATABASES:-1000000}
|
||||
- NEO4J_server_memory_pagecache_size=${NEO4J_SERVER_MEMORY_PAGECACHE_SIZE:-1G}
|
||||
- NEO4J_server_memory_heap_initial__size=${NEO4J_SERVER_MEMORY_HEAP_INITIAL__SIZE:-1G}
|
||||
- NEO4J_server_memory_heap_max__size=${NEO4J_SERVER_MEMORY_HEAP_MAX__SIZE:-1G}
|
||||
# APOC
|
||||
- apoc.export.file.enabled=${NEO4J_POC_EXPORT_FILE_ENABLED:-true}
|
||||
- apoc.import.file.enabled=${NEO4J_APOC_IMPORT_FILE_ENABLED:-true}
|
||||
- apoc.import.file.use_neo4j_config=${NEO4J_APOC_IMPORT_FILE_USE_NEO4J_CONFIG:-true}
|
||||
- "NEO4J_PLUGINS=${NEO4J_PLUGINS:-[\"apoc\"]}"
|
||||
- "NEO4J_dbms_security_procedures_allowlist=${NEO4J_DBMS_SECURITY_PROCEDURES_ALLOWLIST:-apoc.*}"
|
||||
- "NEO4J_dbms_security_procedures_unrestricted=${NEO4J_DBMS_SECURITY_PROCEDURES_UNRESTRICTED:-apoc.*}"
|
||||
# Networking
|
||||
- "dbms.connector.bolt.listen_address=${NEO4J_DBMS_CONNECTOR_BOLT_LISTEN_ADDRESS:-0.0.0.0:7687}"
|
||||
ports:
|
||||
- ${NEO4J_PORT:-7687}:7687
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "http://localhost:7474"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
|
||||
worker:
|
||||
image: prowlercloud/prowler-api:${PROWLER_API_VERSION:-stable}
|
||||
env_file:
|
||||
|
||||
@@ -4,7 +4,26 @@ title: 'Contributing to Documentation'
|
||||
|
||||
Prowler documentation is built using [Mintlify](https://www.mintlify.com/docs), allowing contributors to easily add or enhance documentation.
|
||||
|
||||
## Installation and Setup
|
||||
## Documentation Structure
|
||||
|
||||
The Prowler documentation is organized into several sections. The main ones are:
|
||||
|
||||
- **Getting Started**: Provides an overview of the Prowler platform and its different solutions, including Prowler Cloud/App, Prowler CLI, Prowler MCP Server, Prowler Hub, and Prowler Lighthouse AI. This section helps new users understand which Prowler solution best fits their needs and includes product comparisons.
|
||||
|
||||
- **Guides**: Contains practical tutorials and how-to guides organized by product (Prowler Cloud/App, CLI) and provider (AWS, Azure, GCP, Kubernetes, Microsoft 365, GitHub, etc.). This section covers authentication, integrations, compliance, and advanced usage scenarios.
|
||||
|
||||
- **Developer Guide**: Documentation for contributors looking to extend Prowler functionality. This includes guides on creating providers, services, checks, output formats, integrations, and compliance frameworks. Provider-specific implementation details and testing strategies are also covered here.
|
||||
|
||||
- **Troubleshooting**: Common issues, error messages, and their solutions. This section helps users resolve problems encountered during installation, configuration, or execution.
|
||||
|
||||
|
||||
## AI-Driven Documentation
|
||||
|
||||
As mentioned in the [Introduction](/developer-guide/introduction#ai-driven-contributions), we have specialized resources to enhance AI-driven development.
|
||||
|
||||
This includes the [AGENTS.md](https://github.com/prowler-cloud/prowler/blob/master/docs/AGENTS.md) file that contains the guidelines and style guide for the AI agents in the Prowler documentation.
|
||||
|
||||
## Local Development
|
||||
|
||||
<Steps>
|
||||
<Step title="Install Mintlify CLI">
|
||||
@@ -33,10 +52,10 @@ Prowler documentation is built using [Mintlify](https://www.mintlify.com/docs),
|
||||
</Step>
|
||||
|
||||
<Step title="Submit Changes">
|
||||
Once documentation updates are complete, submit a pull request for review.
|
||||
Once documentation updates are complete, [submit a pull request for review](/developer-guide/introduction#sending-the-pull-request).
|
||||
|
||||
The Prowler team will assess and merge contributions.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
Your efforts help improve Prowler documentation—thank you for contributing!
|
||||
Your efforts help improve Prowler documentation. Thank you for contributing! 🤘
|
||||
|
||||
@@ -2,19 +2,70 @@
|
||||
title: 'Introduction to developing in Prowler'
|
||||
---
|
||||
|
||||
Extending Prowler
|
||||
Thanks for your interest in contributing to Prowler!
|
||||
|
||||
Prowler can be extended in various ways, with common use cases including:
|
||||
Prowler can be extended in various ways. This guide provides the different ways to contribute and how to get started.
|
||||
|
||||
- New security checks
|
||||
- New compliance frameworks
|
||||
- New output formats
|
||||
- New integrations
|
||||
- New proposed features
|
||||
## Contributing to Prowler
|
||||
|
||||
All the relevant information for these cases is included in this guide.
|
||||
### Review Current Issues
|
||||
Check out our [GitHub Issues](https://github.com/prowler-cloud/prowler/issues) page for ideas to contribute.
|
||||
<Columns cols={2}>
|
||||
<Card title="Good First Issue" icon="github" href="https://github.com/prowler-cloud/prowler/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22good%20first%20issue%22">
|
||||
We tag issues as `good first issue` for new contributors. These are typically well-defined and manageable in scope.
|
||||
</Card>
|
||||
<Card title="Help Wanted" icon="github" href="https://github.com/prowler-cloud/prowler/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22help%20wanted%22">
|
||||
We tag issues as `help wanted` for other issues that require more time to complete.
|
||||
</Card>
|
||||
</Columns>
|
||||
|
||||
## Getting the Code and Installing All Dependencies
|
||||
### Expand Prowler's Capabilities
|
||||
Prowler is constantly evolving. Contributions to checks, services, or integrations help improve the tool for everyone. Here is how to get involved:
|
||||
|
||||
<Columns cols={2}>
|
||||
<Card title="Adding New Checks" icon="shield" href="/developer-guide/checks">
|
||||
Want to improve Prowler's detection capabilities for your favorite cloud provider? You can contribute by writing new checks.
|
||||
</Card>
|
||||
<Card title="Adding New Services" icon="server" href="/developer-guide/services">
|
||||
One key service for your favorite cloud provider is missing? Add it to Prowler! Do not forget to include relevant checks to validate functionality.
|
||||
</Card>
|
||||
<Card title="Adding New Providers" icon="cloud" href="/developer-guide/provider">
|
||||
If you would like to extend Prowler to work with a new cloud provider, this typically involves setting up new services and checks to ensure compatibility.
|
||||
</Card>
|
||||
<Card title="Adding New Output Formats" icon="file" href="/developer-guide/outputs">
|
||||
Want to tailor how results are displayed or exported? You can add custom output formats.
|
||||
</Card>
|
||||
<Card title="Adding New Integrations" icon="link" href="/developer-guide/integrations">
|
||||
Prowler can work with other tools and platforms through integrations.
|
||||
</Card>
|
||||
<Card title="Proposing or Implementing Features" icon="lightbulb" href="https://github.com/prowler-cloud/prowler/issues/new?template=feature-request.yml">
|
||||
Propose brand-new features or enhancements to existing ones, or help implement community-requested improvements.
|
||||
</Card>
|
||||
</Columns>
|
||||
|
||||
### Improve Documentation
|
||||
Help make Prowler more accessible by enhancing our documentation, fixing typos, or adding examples/tutorials.
|
||||
|
||||
<Columns cols={2}>
|
||||
<Card title="Documentation Guide" icon="book" href="/developer-guide/documentation">
|
||||
Enhance our documentation, fix typos, or add examples/tutorials.
|
||||
</Card>
|
||||
</Columns>
|
||||
|
||||
### Bug Fixes
|
||||
If you find any issues or bugs, you can report them in the [GitHub Issues](https://github.com/prowler-cloud/prowler/issues) page and if you want you can also fix them.
|
||||
|
||||
<Columns cols={2}>
|
||||
<Card title="Report a Bug" icon="bug" href="https://github.com/prowler-cloud/prowler/issues/new?template=bug_report.yml">
|
||||
Report or fix issues or bugs.
|
||||
</Card>
|
||||
</Columns>
|
||||
|
||||
Remember, our community is here to help! If you need guidance, do not hesitate to ask questions in the issues or join our [<Icon icon="slack" /> Slack workspace](https://goto.prowler.com/slack).
|
||||
|
||||
|
||||
|
||||
## Setting up your development environment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -26,11 +77,11 @@ Before proceeding, ensure the following:
|
||||
|
||||
### Forking the Prowler Repository
|
||||
|
||||
To contribute to Prowler, fork the Prowler GitHub repository. This allows you to propose changes, submit new features, and fix bugs. For guidance on forking, refer to the [official GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo?tool=webui#forking-a-repository).
|
||||
Fork the Prowler GitHub repository to contribute to Prowler. This allows proposing changes, submitting new features, and fixing bugs. For guidance on forking, refer to the [official GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo?tool=webui#forking-a-repository).
|
||||
|
||||
### Cloning Your Forked Repository
|
||||
|
||||
Once your fork is created, clone it using the following commands:
|
||||
Once your fork is created, clone it using the following commands (replace `<your-github-user>` with your GitHub username):
|
||||
|
||||
```
|
||||
git clone https://github.com/<your-github-user>/prowler
|
||||
@@ -56,39 +107,7 @@ If your poetry version is below 2.0.0 you must keep using `poetry shell` to acti
|
||||
In case you have any doubts, consult the [Poetry environment activation guide](https://python-poetry.org/docs/managing-environments/#activating-the-environment).
|
||||
|
||||
</Warning>
|
||||
## Contributing to Prowler
|
||||
|
||||
### Ways to Contribute
|
||||
|
||||
Here are some ideas for collaborating with Prowler:
|
||||
|
||||
1. **Review Current Issues**: Check out our [GitHub Issues](https://github.com/prowler-cloud/prowler/issues) page. We often tag issues as `good first issue` - these are perfect for new contributors as they are typically well-defined and manageable in scope.
|
||||
|
||||
2. **Expand Prowler's Capabilities**: Prowler is constantly evolving, and you can be a part of its growth. Whether you are adding checks, supporting new services, or introducing integrations, your contributions help improve the tool for everyone. Here is how you can get involved:
|
||||
|
||||
- **Adding New Checks**
|
||||
Want to improve Prowler's detection capabilities for your favorite cloud provider? You can contribute by writing new checks. To get started, follow the [create a new check guide](/developer-guide/checks).
|
||||
|
||||
- **Adding New Services**
|
||||
One key service for your favorite cloud provider is missing? Add it to Prowler! To add a new service, check out the [create a new service guide](/developer-guide/services). Do not forget to include relevant checks to validate functionality.
|
||||
|
||||
- **Adding New Providers**
|
||||
If you would like to extend Prowler to work with a new cloud provider, follow the [create a new provider guide](/developer-guide/provider). This typically involves setting up new services and checks to ensure compatibility.
|
||||
|
||||
- **Adding New Output Formats**
|
||||
Want to tailor how results are displayed or exported? You can add custom output formats by following the [create a new output format guide](/developer-guide/outputs).
|
||||
|
||||
- **Adding New Integrations**
|
||||
Prowler can work with other tools and platforms through integrations. If you would like to add one, see the [create a new integration guide](/developer-guide/integrations).
|
||||
|
||||
- **Proposing or Implementing Features**
|
||||
Got an idea to make Prowler better? Whether it is a brand-new feature or an enhancement to an existing one, you are welcome to propose it or help implement community-requested improvements.
|
||||
|
||||
3. **Improve Documentation**: Help make Prowler more accessible by enhancing our documentation, fixing typos, or adding examples/tutorials. See the tutorial of how we write our documentation [here](/developer-guide/documentation).
|
||||
|
||||
4. **Bug Fixes**: If you find any issues or bugs, you can report them in the [GitHub Issues](https://github.com/prowler-cloud/prowler/issues) page and if you want you can also fix them.
|
||||
|
||||
Remember, our community is here to help! If you need guidance, do not hesitate to ask questions in the issues or join our [Slack workspace](https://goto.prowler.com/slack).
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
@@ -121,6 +140,16 @@ These should have been already installed if `poetry install --with dev` was alre
|
||||
|
||||
Additionally, ensure the latest version of [`TruffleHog`](https://github.com/trufflesecurity/trufflehog) is installed to scan for sensitive data in the code. Follow the official [installation guide](https://github.com/trufflesecurity/trufflehog?tab=readme-ov-file#floppy_disk-installation) for setup.
|
||||
|
||||
### AI-Driven Contributions
|
||||
|
||||
If you are using AI assistants to help with your contributions, Prowler provides specialized resources to enhance AI-driven development:
|
||||
|
||||
- **Prowler MCP Server**: The [Prowler MCP Server](/getting-started/products/prowler-mcp) provides AI assistants with access to the entire Prowler ecosystem, including security checks, compliance frameworks, documentation, and more. This enables AI tools to better understand Prowler's architecture and help you create contributions that align with project standards.
|
||||
|
||||
- **AGENTS.md Files**: Each component of the Prowler monorepo includes an `AGENTS.md` file that contains specific guidelines for AI agents working on that component. These files provide context about project structure, coding standards, and best practices. When working on a specific component, refer to the relevant `AGENTS.md` file (e.g., `prowler/AGENTS.md`, `ui/AGENTS.md`, `api/AGENTS.md`) to ensure your AI assistant follows the appropriate guidelines.
|
||||
|
||||
These resources help ensure that AI-assisted contributions maintain consistency with Prowler's codebase and development practices.
|
||||
|
||||
### Dependency Management
|
||||
|
||||
All dependencies are listed in the `pyproject.toml` file.
|
||||
@@ -133,7 +162,7 @@ If you encounter issues when committing to the Prowler repository, use the `--no
|
||||
</Note>
|
||||
### Repository Folder Structure
|
||||
|
||||
Understanding the layout of the Prowler codebase will help you quickly find where to add new features, checks, or integrations. The following is a high-level overview from the root of the repository:
|
||||
The Prowler codebase layout helps quickly locate where to add new features, checks, or integrations. The following is a high-level overview from the root of the repository:
|
||||
|
||||
```
|
||||
prowler/
|
||||
@@ -148,7 +177,7 @@ prowler/
|
||||
├── permissions/ # Permission-related files and policies
|
||||
├── contrib/ # Community-contributed scripts or modules
|
||||
├── kubernetes/ # Kubernetes deployment files
|
||||
├── .github/ # GitHub related files (workflows, issue templates, etc.)
|
||||
├── .github/ # GitHub-related files (workflows, issue templates, etc.)
|
||||
├── pyproject.toml # Python project configuration (Poetry)
|
||||
├── poetry.lock # Poetry lock file
|
||||
├── README.md # Project overview and getting started
|
||||
@@ -158,19 +187,23 @@ prowler/
|
||||
└── ... # Other supporting files
|
||||
```
|
||||
|
||||
## Pull Request Checklist
|
||||
## Sending the Pull Request
|
||||
|
||||
When creating or reviewing a pull request in https://github.com/prowler-cloud/prowler, follow [this checklist](https://github.com/prowler-cloud/prowler/blob/master/.github/pull_request_template.md#checklist).
|
||||
When creating or reviewing a pull request in <Icon icon="github" /> [Prowler](https://github.com/prowler-cloud/prowler), follow [this template](https://github.com/prowler-cloud/prowler/blob/master/.github/pull_request_template.md) and fill it with the relevant information:
|
||||
|
||||
- **Context** and **Description** of the change: This will help the reviewers to understand the change and the purpose of the pull request.
|
||||
- **Steps to review**: A detailed description of how to review the change.
|
||||
- **Checklist**: A mandatory checklist of the things that should be reviewed before merging the pull request.
|
||||
|
||||
## Contribution Appreciation
|
||||
|
||||
If you enjoy swag, we’d love to thank you for your contribution with laptop stickers or other Prowler merchandise!
|
||||
If you enjoy swag, we'd love to thank you for your contribution with laptop stickers or other Prowler merchandise!
|
||||
|
||||
To request swag: Share your pull request details in our [Slack workspace](https://goto.prowler.com/slack).
|
||||
|
||||
You can also reach out to Toni de la Fuente on [Twitter](https://twitter.com/ToniBlyx)—his DMs are open!
|
||||
|
||||
# Testing a Pull Request from a Specific Branch
|
||||
## Testing a Pull Request from a Specific Branch
|
||||
|
||||
To test Prowler from a specific branch (for example, to try out changes from a pull request before it is merged), you can use `pipx` to install directly from GitHub:
|
||||
|
||||
@@ -179,3 +212,5 @@ pipx install "git+https://github.com/prowler-cloud/prowler.git@branch-name"
|
||||
```
|
||||
|
||||
Replace `branch-name` with the name of the branch you want to test. This will install Prowler in an isolated environment, allowing you to try out the changes safely.
|
||||
|
||||
For more details on testing go to the [Testing section](/developer-guide/unit-testing) of this documentation.
|
||||
@@ -4,7 +4,7 @@ title: 'Kubernetes Provider'
|
||||
|
||||
This page details the [Kubernetes](https://kubernetes.io/) provider implementation in Prowler.
|
||||
|
||||
By default, Prowler will audit all namespaces in the Kubernetes cluster accessible by the configured context. To configure it, see the [In-Cluster Execution](/user-guide/providers/kubernetes/in-cluster) or [Non In-Cluster Execution](/user-guide/providers/kubernetes/outside-cluster) guides.
|
||||
By default, Prowler will audit all namespaces in the Kubernetes cluster accessible by the configured context. To configure it, see the [In-Cluster Execution](/user-guide/providers/kubernetes/getting-started-k8s#in-cluster-execution) or [Non In-Cluster Execution](/user-guide/providers/kubernetes/getting-started-k8s#non-in-cluster-execution) guides.
|
||||
|
||||
## Kubernetes Provider Classes Architecture
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
{
|
||||
"group": "Prowler Lighthouse AI",
|
||||
"pages": [
|
||||
"user-guide/tutorials/prowler-app-lighthouse"
|
||||
"getting-started/products/prowler-lighthouse-ai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,7 +109,14 @@
|
||||
"user-guide/tutorials/prowler-app-jira-integration"
|
||||
]
|
||||
},
|
||||
"user-guide/tutorials/prowler-app-lighthouse",
|
||||
{
|
||||
"group": "Lighthouse AI",
|
||||
"pages": [
|
||||
"user-guide/tutorials/prowler-app-lighthouse",
|
||||
"user-guide/tutorials/prowler-app-lighthouse-multi-llm"
|
||||
]
|
||||
},
|
||||
"user-guide/tutorials/prowler-cloud-public-ips",
|
||||
{
|
||||
"group": "Tutorials",
|
||||
"pages": [
|
||||
@@ -194,8 +201,7 @@
|
||||
{
|
||||
"group": "Kubernetes",
|
||||
"pages": [
|
||||
"user-guide/providers/kubernetes/in-cluster",
|
||||
"user-guide/providers/kubernetes/outside-cluster",
|
||||
"user-guide/providers/kubernetes/getting-started-k8s",
|
||||
"user-guide/providers/kubernetes/misc"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -4,12 +4,12 @@ title: "Installation"
|
||||
|
||||
### Installation
|
||||
|
||||
Prowler App supports multiple installation methods based on your environment.
|
||||
Prowler App offers flexible installation methods tailored to various environments.
|
||||
|
||||
Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detailed usage instructions.
|
||||
|
||||
<Warning>
|
||||
Prowler configuration is based in `.env` files. Every version of Prowler can have differences on that file, so, please, use the file that corresponds with that version or repository branch or tag.
|
||||
Prowler configuration is based on `.env` files. Every version of Prowler can have differences on that file, so, please, use the file that corresponds with that version or repository branch or tag.
|
||||
</Warning>
|
||||
|
||||
<Tabs>
|
||||
@@ -26,8 +26,6 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
curl -sLO "https://raw.githubusercontent.com/prowler-cloud/prowler/refs/tags/${VERSION}/.env"
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
</Tab>
|
||||
<Tab title="GitHub">
|
||||
_Requirements_:
|
||||
@@ -106,11 +104,13 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Update Prowler App
|
||||
### Updating Prowler App
|
||||
|
||||
Upgrade Prowler App installation using one of two options:
|
||||
|
||||
#### Option 1: Update Environment File
|
||||
#### Option 1: Updating the Environment File
|
||||
|
||||
To update the environment file:
|
||||
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
@@ -119,7 +119,7 @@ PROWLER_UI_VERSION="5.9.0"
|
||||
PROWLER_API_VERSION="5.9.0"
|
||||
```
|
||||
|
||||
#### Option 2: Use Docker Compose Pull
|
||||
#### Option 2: Using Docker Compose Pull
|
||||
|
||||
```bash
|
||||
docker compose pull --policy always
|
||||
@@ -133,7 +133,7 @@ The `--policy always` flag ensures that Docker pulls the latest images even if t
|
||||
Everything is preserved, nothing will be deleted after the update.
|
||||
</Note>
|
||||
|
||||
### Troubleshooting
|
||||
### Troubleshooting Installation Issues
|
||||
|
||||
If containers don't start, check logs for errors:
|
||||
|
||||
@@ -145,16 +145,16 @@ docker compose logs
|
||||
docker images | grep prowler
|
||||
```
|
||||
|
||||
If you encounter issues, you can rollback to the previous version by changing the `.env` file back to your previous version and running:
|
||||
If issues are encountered, rollback to the previous version by changing the `.env` file back to the previous version and running:
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Container versions
|
||||
### Container Versions
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
The available versions of Prowler App are the following:
|
||||
|
||||
- `latest`: in sync with `master` branch (please note that it is not a stable version)
|
||||
- `v4-latest`: in sync with `v4` branch (please note that it is not a stable version)
|
||||
|
||||
@@ -4,7 +4,7 @@ title: 'Installation'
|
||||
|
||||
## Installation
|
||||
|
||||
Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/). Install it as a Python package with `Python >= 3.9, <= 3.12`:
|
||||
To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/):
|
||||
|
||||
<Tabs>
|
||||
<Tab title="pipx">
|
||||
@@ -41,7 +41,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
prowler -v
|
||||
```
|
||||
|
||||
Upgrade Prowler to the latest version:
|
||||
To upgrade Prowler to the latest version:
|
||||
|
||||
``` bash
|
||||
pip install --upgrade prowler
|
||||
@@ -54,8 +54,6 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
* In the command below, change `-v` to your local directory path in order to access the reports.
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
|
||||
_Commands_:
|
||||
|
||||
``` bash
|
||||
@@ -75,7 +73,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler
|
||||
poetry install
|
||||
@@ -94,7 +92,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
|
||||
_Commands_:
|
||||
|
||||
```
|
||||
```bash
|
||||
python3 -m pip install --user pipx
|
||||
python3 -m pipx ensurepath
|
||||
pipx install prowler
|
||||
@@ -104,7 +102,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
<Tab title="Ubuntu">
|
||||
_Requirements_:
|
||||
|
||||
* `Ubuntu 23.04` or above, if you are using an older version of Ubuntu check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure you have `Python >= 3.9, <= 3.12`.
|
||||
* `Ubuntu 23.04` or above. For older Ubuntu versions, check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure `Python >= 3.9, <= 3.12` is installed.
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
@@ -121,7 +119,7 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
<Tab title="Brew">
|
||||
_Requirements_:
|
||||
|
||||
* `Brew` installed in your Mac or Linux
|
||||
* `Brew` installed on Mac or Linux
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
@@ -171,7 +169,8 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/).
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
## Container versions
|
||||
|
||||
## Container Versions
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: 'Overview'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.8.0" />
|
||||
|
||||
Prowler Lighthouse AI is a Cloud Security Analyst chatbot that helps you understand, prioritize, and remediate security findings in your cloud environments. It's designed to provide security expertise for teams without dedicated resources, acting as your 24/7 virtual cloud security analyst.
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-intro.png" alt="Prowler Lighthouse" />
|
||||
|
||||
<Card title="Set Up Lighthouse AI" icon="rocket" href="/user-guide/tutorials/prowler-app-lighthouse#set-up">
|
||||
Learn how to configure Lighthouse AI with your preferred LLM provider
|
||||
</Card>
|
||||
|
||||
## Capabilities
|
||||
|
||||
Prowler Lighthouse AI is designed to be your AI security team member, with capabilities including:
|
||||
|
||||
### Natural Language Querying
|
||||
|
||||
Ask questions in plain English about your security findings. Examples:
|
||||
|
||||
- "What are my highest risk findings?"
|
||||
- "Show me all S3 buckets with public access."
|
||||
- "What security issues were found in my production accounts?"
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature1.png" alt="Natural language querying" />
|
||||
|
||||
### Detailed Remediation Guidance
|
||||
|
||||
Get tailored step-by-step instructions for fixing security issues:
|
||||
|
||||
- Clear explanations of the problem and its impact
|
||||
- Commands or console steps to implement fixes
|
||||
- Alternative approaches with different solutions
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature2.png" alt="Detailed Remediation" />
|
||||
|
||||
### Enhanced Context and Analysis
|
||||
|
||||
Lighthouse AI can provide additional context to help you understand the findings:
|
||||
|
||||
- Explain security concepts related to findings in simple terms
|
||||
- Provide risk assessments based on your environment and context
|
||||
- Connect related findings to show broader security patterns
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-config.png" alt="Business Context" />
|
||||
|
||||
<img src="/images/prowler-app/lighthouse-feature3.png" alt="Contextual Responses" />
|
||||
|
||||
## Important Notes
|
||||
|
||||
Prowler Lighthouse AI is powerful, but there are limitations:
|
||||
|
||||
- **Continuous improvement**: Please report any issues, as the feature may make mistakes or encounter errors, despite extensive testing.
|
||||
- **Access limitations**: Lighthouse AI can only access data the logged-in user can view. If you can't see certain information, Lighthouse AI can't see it either.
|
||||
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
|
||||
- **Response quality**: The response quality depends on the selected LLM provider and model. Choose models with strong tool-calling capabilities for best results. We recommend `gpt-5` model from OpenAI.
|
||||
|
||||
### Getting Help
|
||||
|
||||
If you encounter issues with Prowler Lighthouse AI or have suggestions for improvements, please [reach out through our Slack channel](https://goto.prowler.com/slack).
|
||||
|
||||
### What Data Is Shared to LLM Providers?
|
||||
|
||||
The following API endpoints are accessible to Prowler Lighthouse AI. Data from the following API endpoints could be shared with LLM provider depending on the scope of user's query:
|
||||
|
||||
#### Accessible API Endpoints
|
||||
|
||||
**User Management:**
|
||||
|
||||
- List all users - `/api/v1/users`
|
||||
- Retrieve the current user's information - `/api/v1/users/me`
|
||||
|
||||
**Provider Management:**
|
||||
|
||||
- List all providers - `/api/v1/providers`
|
||||
- Retrieve data from a provider - `/api/v1/providers/{id}`
|
||||
|
||||
**Scan Management:**
|
||||
|
||||
- List all scans - `/api/v1/scans`
|
||||
- Retrieve data from a specific scan - `/api/v1/scans/{id}`
|
||||
|
||||
**Resource Management:**
|
||||
|
||||
- List all resources - `/api/v1/resources`
|
||||
- Retrieve data for a resource - `/api/v1/resources/{id}`
|
||||
|
||||
**Findings Management:**
|
||||
|
||||
- List all findings - `/api/v1/findings`
|
||||
- Retrieve data from a specific finding - `/api/v1/findings/{id}`
|
||||
- Retrieve metadata values from findings - `/api/v1/findings/metadata`
|
||||
|
||||
**Overview Data:**
|
||||
|
||||
- Get aggregated findings data - `/api/v1/overviews/findings`
|
||||
- Get findings data by severity - `/api/v1/overviews/findings_severity`
|
||||
- Get aggregated provider data - `/api/v1/overviews/providers`
|
||||
- Get findings data by service - `/api/v1/overviews/services`
|
||||
|
||||
**Compliance Management:**
|
||||
|
||||
- List compliance overviews (optionally filter by scan) - `/api/v1/compliance-overviews`
|
||||
- Retrieve data from a specific compliance overview - `/api/v1/compliance-overviews/{id}`
|
||||
|
||||
#### Excluded API Endpoints
|
||||
|
||||
Not all Prowler API endpoints are integrated with Lighthouse AI. They are intentionally excluded for the following reasons:
|
||||
|
||||
- OpenAI/other LLM providers shouldn't have access to sensitive data (like fetching provider secrets and other sensitive config)
|
||||
- Users queries don't need responses from those API endpoints (ex: tasks, tenant details, downloading zip file, etc.)
|
||||
|
||||
**Excluded Endpoints:**
|
||||
|
||||
**User Management:**
|
||||
|
||||
- List specific users information - `/api/v1/users/{id}`
|
||||
- List user memberships - `/api/v1/users/{user_pk}/memberships`
|
||||
- Retrieve membership data from the user - `/api/v1/users/{user_pk}/memberships/{id}`
|
||||
|
||||
**Tenant Management:**
|
||||
|
||||
- List all tenants - `/api/v1/tenants`
|
||||
- Retrieve data from a tenant - `/api/v1/tenants/{id}`
|
||||
- List tenant memberships - `/api/v1/tenants/{tenant_pk}/memberships`
|
||||
- List all invitations - `/api/v1/tenants/invitations`
|
||||
- Retrieve data from tenant invitation - `/api/v1/tenants/invitations/{id}`
|
||||
|
||||
**Security and Configuration:**
|
||||
|
||||
- List all secrets - `/api/v1/providers/secrets`
|
||||
- Retrieve data from a secret - `/api/v1/providers/secrets/{id}`
|
||||
- List all provider groups - `/api/v1/provider-groups`
|
||||
- Retrieve data from a provider group - `/api/v1/provider-groups/{id}`
|
||||
|
||||
**Reports and Tasks:**
|
||||
|
||||
- Download zip report - `/api/v1/scans/{v1}/report`
|
||||
- List all tasks - `/api/v1/tasks`
|
||||
- Retrieve data from a specific task - `/api/v1/tasks/{id}`
|
||||
|
||||
**Lighthouse AI Configuration:**
|
||||
|
||||
- List LLM providers - `/api/v1/lighthouse/providers`
|
||||
- Retrieve LLM provider - `/api/v1/lighthouse/providers/{id}`
|
||||
- List available models - `/api/v1/lighthouse/models`
|
||||
- Retrieve tenant configuration - `/api/v1/lighthouse/configuration`
|
||||
|
||||
<Note>
|
||||
Agents only have access to hit GET endpoints. They don't have access to other HTTP methods.
|
||||
|
||||
</Note>
|
||||
|
||||
## FAQs
|
||||
|
||||
**1. Which LLM providers are supported?**
|
||||
|
||||
Lighthouse AI supports three providers:
|
||||
|
||||
- **OpenAI** - GPT models (GPT-5, GPT-4o, etc.)
|
||||
- **Amazon Bedrock** - Claude, Llama, Titan, and other models via AWS
|
||||
- **OpenAI Compatible** - Custom endpoints like OpenRouter, Ollama, or any OpenAI-compatible service
|
||||
|
||||
For detailed configuration instructions, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
|
||||
|
||||
**2. Why a multi-agent supervisor model?**
|
||||
|
||||
Context windows are limited. While demo data fits inside the context window, querying real-world data often exceeds it. A multi-agent architecture is used so different agents fetch different sizes of data and respond with the minimum required data to the supervisor. This spreads the context window usage across agents.
|
||||
|
||||
**3. Is my security data shared with LLM providers?**
|
||||
|
||||
Minimal data is shared to generate useful responses. Agents can access security findings and remediation details when needed. Provider secrets are protected by design and cannot be read. The LLM provider credentials configured with Lighthouse AI are only accessible to our NextJS server and are never sent to the LLM providers. Resource metadata (names, tags, account/project IDs, etc) may be shared with the configured LLM provider based on query requirements.
|
||||
|
||||
**4. Can the Lighthouse AI change my cloud environment?**
|
||||
|
||||
No. The agent doesn't have the tools to make the changes, even if the configured cloud provider API keys contain permissions to modify resources.
|
||||
@@ -5,7 +5,7 @@ title: "Overview"
|
||||
**Prowler MCP Server** brings the entire Prowler ecosystem to AI assistants through the Model Context Protocol (MCP). It enables seamless integration with AI tools like Claude Desktop, Cursor, and other MCP clients, allowing interaction with Prowler's security capabilities through natural language.
|
||||
|
||||
<Warning>
|
||||
**Preview Feature**: This MCP server is currently in preview and under active development. Features and functionality may change. We welcome your feedback—please report any issues on [GitHub](https://github.com/prowler-cloud/prowler/issues) or join our [Slack community](https://goto.prowler.com/slack) to discuss and share your thoughts.
|
||||
**Preview Feature**: This MCP server is currently under active development. Features and functionality may change. We welcome your feedback—please report any issues on [GitHub](https://github.com/prowler-cloud/prowler/issues) or join our [Slack community](https://goto.prowler.com/slack) to discuss and share your thoughts.
|
||||
</Warning>
|
||||
|
||||
## What is the Model Context Protocol?
|
||||
@@ -42,6 +42,15 @@ Search and retrieve official Prowler documentation:
|
||||
- **Contextual Results**: Get relevant documentation pages with highlighted snippets.
|
||||
- **Document Retrieval**: Access complete markdown content of any documentation file.
|
||||
|
||||
## MCP Server Architecture
|
||||
|
||||
The following diagram illustrates the Prowler MCP Server architecture and its integration points:
|
||||
|
||||
<img className="block dark:hidden" src="/images/prowler_mcp_schema_light.png" alt="Prowler MCP Server Schema" />
|
||||
<img className="hidden dark:block" src="/images/prowler_mcp_schema_dark.png" alt="Prowler MCP Server Schema" />
|
||||
|
||||
The architecture shows how AI assistants connect through the MCP protocol to access Prowler's three main components: Prowler Cloud/App for security operations, Prowler Hub for security knowledge, and Prowler Documentation for guidance and reference.
|
||||
|
||||
## Use Cases
|
||||
|
||||
The Prowler MCP Server enables powerful workflows through AI assistants:
|
||||
|
||||
|
Before Width: | Height: | Size: 197 KiB After Width: | Height: | Size: 96 KiB |
|
After Width: | Height: | Size: 540 KiB |
|
Before Width: | Height: | Size: 204 KiB After Width: | Height: | Size: 136 KiB |
|
Before Width: | Height: | Size: 241 KiB After Width: | Height: | Size: 147 KiB |
|
Before Width: | Height: | Size: 268 KiB After Width: | Height: | Size: 180 KiB |
|
Before Width: | Height: | Size: 404 KiB After Width: | Height: | Size: 165 KiB |
|
After Width: | Height: | Size: 347 KiB |
|
After Width: | Height: | Size: 173 KiB |
|
After Width: | Height: | Size: 328 KiB |
|
After Width: | Height: | Size: 332 KiB |
@@ -28,12 +28,12 @@ The supported providers right now are:
|
||||
| [AWS](/user-guide/providers/aws/getting-started-aws) | Official | UI, API, CLI |
|
||||
| [Azure](/user-guide/providers/azure/getting-started-azure) | Official | UI, API, CLI |
|
||||
| [Google Cloud](/user-guide/providers/gcp/getting-started-gcp) | Official | UI, API, CLI |
|
||||
| [Kubernetes](/user-guide/providers/kubernetes/in-cluster) | Official | UI, API, CLI |
|
||||
| [Kubernetes](/user-guide/providers/kubernetes/getting-started-k8s) | Official | UI, API, CLI |
|
||||
| [M365](/user-guide/providers/microsoft365/getting-started-m365) | Official | UI, API, CLI |
|
||||
| [Github](/user-guide/providers/github/getting-started-github) | Official | UI, API, CLI |
|
||||
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | UI, API, CLI |
|
||||
| [Infra as Code](/user-guide/providers/iac/getting-started-iac) | Official | UI, API, CLI |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | CLI, API |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | UI, API, CLI |
|
||||
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | CLI |
|
||||
| **NHN** | Unofficial | CLI |
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Standard results will be shown and additionally the framework information as the
|
||||
**If Prowler can't find a resource related with a check from a compliance requirement, this requirement won't appear on the output**
|
||||
</Note>
|
||||
|
||||
## List Available Compliance Frameworks
|
||||
## List Available Compliance Frameworks
|
||||
|
||||
To see which compliance frameworks are covered by Prowler, use the `--list-compliance` option:
|
||||
|
||||
@@ -34,7 +34,7 @@ prowler <provider> --list-compliance
|
||||
|
||||
Or you can visit [Prowler Hub](https://hub.prowler.com/compliance).
|
||||
|
||||
## List Requirements of Compliance Frameworks
|
||||
## List Requirements of Compliance Frameworks
|
||||
To list requirements for a compliance framework, use the `--list-compliance-requirements` option:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -94,7 +94,7 @@ The following list includes all the Azure checks with configurable variables tha
|
||||
|
||||
### Configurable Checks
|
||||
|
||||
## Kubernetes
|
||||
## Kubernetes
|
||||
|
||||
### Configurable Checks
|
||||
The following list includes all the Kubernetes checks with configurable variables that can be changed in the configuration yaml file:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: 'Integrations'
|
||||
---
|
||||
|
||||
## Integration with Slack
|
||||
## Integration with Slack
|
||||
|
||||
Prowler can be integrated with [Slack](https://slack.com/) to send a summary of the execution having configured a Slack APP in your channel with the following command:
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ prowler <provider> -V/-v/--version
|
||||
|
||||
Prowler provides various execution settings.
|
||||
|
||||
### Verbose Execution
|
||||
### Verbose Execution
|
||||
|
||||
To enable verbose mode in Prowler, similar to Version 2, use:
|
||||
|
||||
@@ -54,7 +54,7 @@ To run Prowler without color formatting:
|
||||
prowler <provider> --no-color
|
||||
```
|
||||
|
||||
### Checks in Prowler
|
||||
### Checks in Prowler
|
||||
|
||||
Prowler provides various security checks per cloud provider. Use the following options to list, execute, or exclude specific checks:
|
||||
|
||||
@@ -96,7 +96,7 @@ prowler <provider> -e/--excluded-checks ec2 rds
|
||||
prowler <provider> -C/--checks-file <checks_list>.json
|
||||
```
|
||||
|
||||
## Custom Checks in Prowler
|
||||
## Custom Checks in Prowler
|
||||
|
||||
Prowler supports custom security checks, allowing users to define their own logic.
|
||||
|
||||
|
||||
@@ -19,15 +19,45 @@ The Mutelist option works in combination with other filtering mechanisms and mod
|
||||
|
||||
## How the Mutelist Works
|
||||
|
||||
The **Mutelist** uses both "AND" and "OR" logic to determine which resources, checks, regions, and tags should be muted. For each check, the Mutelist evaluates whether the account, region, and resource match the specified criteria using "AND" logic. If tags are specified, the Mutelist can apply either "AND" or "OR" logic.
|
||||
The **Mutelist** uses **AND logic** to evaluate whether a finding should be muted. For a finding to be muted, **ALL** of the following conditions must match:
|
||||
|
||||
If any of the criteria do not match, the check is not muted.
|
||||
- **Account** matches (exact match or `*`)
|
||||
- **Check** matches (exact match, regex pattern, or `*`)
|
||||
- **Region** matches (exact match, regex pattern, or `*`)
|
||||
- **Resource** matches (exact match, regex pattern, or `*`)
|
||||
- **Tags** match (if specified)
|
||||
|
||||
If **any** of these criteria do not match, the finding is **not muted**.
|
||||
|
||||
### Tag Matching Logic
|
||||
|
||||
Tags have special matching behavior:
|
||||
|
||||
- **Multiple tags in the list = AND logic**: ALL tags must be present on the resource
|
||||
```yaml
|
||||
Tags:
|
||||
- "environment=dev"
|
||||
- "team=backend" # BOTH tags required
|
||||
```
|
||||
|
||||
- **Regex alternation within a single tag = OR logic**: Use the pipe operator `|` for OR
|
||||
```yaml
|
||||
Tags:
|
||||
- "environment=dev|environment=stg" # Matches EITHER dev OR stg
|
||||
```
|
||||
|
||||
- **Complex tag patterns**: Combine AND and OR using regex
|
||||
```yaml
|
||||
Tags:
|
||||
- "team=backend" # Required
|
||||
- "environment=dev|environment=stg" # AND (dev OR stg)
|
||||
```
|
||||
|
||||
<Note>
|
||||
Remember that mutelist can be used with regular expressions.
|
||||
|
||||
</Note>
|
||||
## Mutelist Specification
|
||||
## Mutelist Specification
|
||||
|
||||
<Note>
|
||||
- For Azure provider, the Account ID is the Subscription Name and the Region is the Location.
|
||||
@@ -40,9 +70,10 @@ The Mutelist file uses the [YAML](https://en.wikipedia.org/wiki/YAML) format wit
|
||||
```yaml
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
### For each check you can except Accounts, Regions, Resources and/or Tags.
|
||||
### Multiple tags in the list are "ANDed" together (ALL must match).
|
||||
### Use regex alternation (|) within a single tag for "OR" logic (e.g., "env=dev|env=stg").
|
||||
### For each check you can use Exceptions to unmute specific Accounts, Regions, Resources and/or Tags.
|
||||
### All conditions (Account, Check, Region, Resource, Tags) are ANDed together.
|
||||
########################### MUTELIST EXAMPLE ###########################
|
||||
Mutelist:
|
||||
Accounts:
|
||||
@@ -148,11 +179,11 @@ Mutelist:
|
||||
|
||||
| Field| Description| Logic
|
||||
|----------|----------|----------
|
||||
| `account_id`| Use `*` to apply the mutelist to all accounts.| `ANDed`
|
||||
| `check_name`| The name of the Prowler check. Use `*` to apply the mutelist to all checks, or `service_*` to apply it to all service's checks.| `ANDed`
|
||||
| `region`| The region identifier. Use `*` to apply the mutelist to all regions.| `ANDed`
|
||||
| `resource`| The resource identifier. Use `*` to apply the mutelist to all resources.| `ANDed`
|
||||
| `tag`| The tag value.| `ORed`
|
||||
| `account_id`| Use `*` to apply the mutelist to all accounts. Supports exact match or wildcard.| `AND` (with other fields)
|
||||
| `check_name`| The name of the Prowler check. Use `*` to apply the mutelist to all checks, or `service_*` to apply it to all service's checks. Supports regex patterns.| `AND` (with other fields)
|
||||
| `region`| The region identifier. Use `*` to apply the mutelist to all regions. Supports regex patterns.| `AND` (with other fields)
|
||||
| `resource`| The resource identifier. Use `*` to apply the mutelist to all resources. Supports regex patterns.| `AND` (with other fields)
|
||||
| `tags`| List of tag patterns in `key=value` format. **Multiple tags = AND** (all must match). **Regex alternation within single tag = OR** (use `tag1\|tag2`).| `AND` between tags, `OR` within regex
|
||||
|
||||
### Description
|
||||
|
||||
@@ -173,6 +204,68 @@ Replace `<provider>` with the appropriate provider name.
|
||||
- The Mutelist can be used in combination with other Prowler options, such as the `--service` or `--checks` option, to further customize the scanning process.
|
||||
- Make sure to review and update the Mutelist regularly to ensure it reflects the desired exclusions and remains up to date with your infrastructure.
|
||||
|
||||
## Current Limitations and Workarounds
|
||||
|
||||
### Limitation: No OR Logic Between Different Rule Sets
|
||||
|
||||
The current Mutelist schema **does not support OR logic** between different condition sets. Each check can have only **one rule object**, and all conditions are **ANDed** together.
|
||||
|
||||
**Example of unsupported scenario:**
|
||||
```yaml
|
||||
# ❌ INVALID: Cannot have multiple rule blocks for the same check
|
||||
Accounts:
|
||||
"*":
|
||||
Checks:
|
||||
"*": # Rule 1
|
||||
Regions: ["eu-west-1", "us-west-2"]
|
||||
Resources: ["*"]
|
||||
"*": # Rule 2 - This will OVERWRITE Rule 1 (YAML duplicate key)
|
||||
Regions: ["us-east-1"]
|
||||
Tags: ["environment=dev"]
|
||||
```
|
||||
|
||||
**Workaround: Use multiple scans with different mutelists**
|
||||
|
||||
For complex scenarios requiring OR logic, run separate scans:
|
||||
|
||||
```bash
|
||||
# Scan 1: Mute findings in non-critical regions
|
||||
prowler aws --mutelist-file mutelist_noncritical.yaml
|
||||
|
||||
# Scan 2: Mute dev/stg in critical regions
|
||||
prowler aws --mutelist-file mutelist_critical.yaml --regions us-east-1,sa-east-1
|
||||
```
|
||||
|
||||
Then merge the outputs in your reporting pipeline.
|
||||
|
||||
### Limitation: Cannot Negate Regions
|
||||
|
||||
You cannot express "all regions **except** X and Y". You must explicitly list all regions you want to mute.
|
||||
|
||||
**Workaround:**
|
||||
```yaml
|
||||
# Must enumerate all unwanted regions
|
||||
Accounts:
|
||||
"*":
|
||||
Checks:
|
||||
"*":
|
||||
Regions:
|
||||
- "af-south-1"
|
||||
- "ap-east-1"
|
||||
# ... list all regions EXCEPT the ones you want to monitor
|
||||
Resources: ["*"]
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Use regex patterns for flexibility**: Instead of listing multiple resources, use regex patterns like `"dev-.*"` or `"test-instance-[0-9]+"`
|
||||
|
||||
2. **Combine tag OR logic with regex**: Use `"environment=dev|environment=stg|environment=test"` instead of multiple tag entries
|
||||
|
||||
3. **Be specific with exceptions**: Use the `Exceptions` field to unmute specific resources within a broader muting rule
|
||||
|
||||
4. **Test your mutelist**: Run Prowler with `--output-modes json` and verify that the expected findings are muted
|
||||
|
||||
## AWS Mutelist
|
||||
|
||||
### Muting specific AWS regions
|
||||
|
||||
@@ -10,7 +10,7 @@ This can help for really large accounts, but please be aware of AWS API rate lim
|
||||
2. **API Rate Limits**: Most of the rate limits in AWS are applied at the API level. Each API call to an AWS service counts towards the rate limit for that service.
|
||||
3. **Throttling Responses**: When you exceed the rate limit for a service, AWS responds with a throttling error. In AWS SDKs, these are typically represented as `ThrottlingException` or `RateLimitExceeded` errors.
|
||||
|
||||
For information on Prowler's retrier configuration please refer to this [page](https://docs.prowler.cloud/en/latest/tutorials/aws/boto3-configuration/).
|
||||
For information on Prowler's retrier configuration please refer to this [page](https://docs.prowler.com/user-guide/providers/aws/boto3-configuration/).
|
||||
|
||||
<Note>
|
||||
You might need to increase the `--aws-retries-max-attempts` parameter from the default value of 3. The retrier follows an exponential backoff strategy.
|
||||
|
||||
@@ -24,6 +24,6 @@ By default, it extracts resources from all the regions, you could use `-f`/`--fi
|
||||
|
||||

|
||||
|
||||
## Objections
|
||||
## Objections
|
||||
|
||||
The inventorying process is carried out with `resourcegroupstaggingapi` calls, which means that only resources they have or have had tags will appear (except for the IAM and S3 resources which are done with Boto3 API calls).
|
||||
|
||||