mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-06 16:58:19 +00:00
Compare commits
64 Commits
v5.22
...
better-sentry
| Author | SHA1 | Date | |
|---|---|---|---|
| d3daeb1d75 | |||
| 4f86667433 | |||
| 4bb1e5cff7 | |||
| 99b80ebbd9 | |||
| d18c5a8974 | |||
| ab00c2dce1 | |||
| 765f9c72f2 | |||
| de5bb94ff6 | |||
| c009a2128a | |||
| 50556df713 | |||
| 3b875484b0 | |||
| 442b379777 | |||
| 2a8b6261e1 | |||
| 6df74529d6 | |||
| 6f6d62f51f | |||
| 7148086410 | |||
| 4ef0b1bf2c | |||
| de492a770c | |||
| e9009f783b | |||
| db1edf5ca7 | |||
| 82d3ccec18 | |||
| ff46281f64 | |||
| 94e234cefb | |||
| 8267fc4813 | |||
| 8bfeee238b | |||
| cc197ea901 | |||
| 2b5d015e09 | |||
| 73e0ac6892 | |||
| 700b51ddad | |||
| 417be55604 | |||
| f75ce7b4dd | |||
| 269d9dfe41 | |||
| 7b0ce7842b | |||
| 0a11ca4a68 | |||
| c953fa7e67 | |||
| 73907db856 | |||
| 041f95b3df | |||
| 716c130140 | |||
| c651f60e3a | |||
| dd00d71a07 | |||
| 834d1bca49 | |||
| 2cf45c72b6 | |||
| 213e18724d | |||
| 571141f57c | |||
| 45f0909c3e | |||
| b01fcc6cb2 | |||
| 2ddd5b3091 | |||
| 6100932c60 | |||
| 1c2b146e6e | |||
| 833f3779ef | |||
| c752811666 | |||
| 4d1f7626f9 | |||
| 9bf2a13177 | |||
| d15e67e2e5 | |||
| 20cf5562b8 | |||
| 36279f694c | |||
| c991a1d0e8 | |||
| aa3641718b | |||
| bb80797392 | |||
| 435624fcd4 | |||
| 9e67f31913 | |||
| 0984cfd75b | |||
| c1044ef491 | |||
| 19c4c9251c |
@@ -78,6 +78,9 @@ TASK_RETRY_ATTEMPTS=5
|
||||
|
||||
# Valkey settings
|
||||
# If running Valkey and celery on host, use localhost, else use 'valkey'
|
||||
VALKEY_SCHEME=redis
|
||||
VALKEY_USERNAME=
|
||||
VALKEY_PASSWORD=
|
||||
VALKEY_HOST=valkey
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
@@ -117,7 +117,10 @@ runs:
|
||||
INPUTS_IMAGE_TAG: ${{ inputs.image-tag }}
|
||||
|
||||
- name: Comment scan results on PR
|
||||
if: inputs.create-pr-comment == 'true' && github.event_name == 'pull_request'
|
||||
if: >-
|
||||
inputs.create-pr-comment == 'true'
|
||||
&& github.event_name == 'pull_request'
|
||||
&& github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
env:
|
||||
IMAGE_NAME: ${{ inputs.image-name }}
|
||||
|
||||
@@ -67,6 +67,11 @@ provider/googleworkspace:
|
||||
- any-glob-to-any-file: "prowler/providers/googleworkspace/**"
|
||||
- any-glob-to-any-file: "tests/providers/googleworkspace/**"
|
||||
|
||||
provider/vercel:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/**"
|
||||
|
||||
github_actions:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: ".github/workflows/*"
|
||||
@@ -102,6 +107,8 @@ mutelist:
|
||||
- any-glob-to-any-file: "tests/providers/openstack/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/googleworkspace/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/googleworkspace/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/lib/mutelist/**"
|
||||
|
||||
integration/s3:
|
||||
- changed-files:
|
||||
|
||||
@@ -177,6 +177,14 @@ modules:
|
||||
- tests/providers/llm/**
|
||||
e2e: []
|
||||
|
||||
- name: sdk-vercel
|
||||
match:
|
||||
- prowler/providers/vercel/**
|
||||
- prowler/compliance/vercel/**
|
||||
tests:
|
||||
- tests/providers/vercel/**
|
||||
e2e: []
|
||||
|
||||
# ============================================
|
||||
# SDK - Lib modules
|
||||
# ============================================
|
||||
|
||||
@@ -27,6 +27,11 @@ jobs:
|
||||
patch_version: ${{ steps.detect.outputs.patch_version }}
|
||||
current_api_version: ${{ steps.get_api_version.outputs.current_api_version }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -79,6 +84,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -204,6 +214,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -32,6 +32,16 @@ jobs:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
api.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -41,6 +41,18 @@ jobs:
|
||||
- 'python'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
uploads.github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
objects.githubusercontent.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -18,9 +18,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
@@ -43,7 +40,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
short-sha: ${{ steps.set-short-sha.outputs.short-sha }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
|
||||
- name: Calculate short SHA
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
@@ -55,7 +59,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -94,6 +105,26 @@ jobs:
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
_http._tcp.deb.debian.org:443
|
||||
aka.ms:443
|
||||
auth.docker.io:443
|
||||
cdn.powershellgallery.com:443
|
||||
dc.services.visualstudio.com:443
|
||||
debian.map.fastlydns.net:80
|
||||
files.pythonhosted.org:443
|
||||
github.com:443
|
||||
powershellinfraartifacts-gkhedzdeaghdezhr.z01.azurefd.net:443
|
||||
production.cloudflare.docker.com:443
|
||||
pypi.org:443
|
||||
registry-1.docker.io:443
|
||||
release-assets.githubusercontent.com:443
|
||||
www.powershellgallery.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -132,17 +163,26 @@ jobs:
|
||||
needs: [setup, container-build-push]
|
||||
if: always() && needs.setup.result == 'success' && needs.container-build-push.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
@@ -184,7 +224,14 @@ jobs:
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -227,6 +274,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
|
||||
- name: Trigger API deployment
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
with:
|
||||
|
||||
@@ -27,6 +27,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -65,6 +72,30 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
github.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
debian.map.fastlydns.net:80
|
||||
release-assets.githubusercontent.com:443
|
||||
objects.githubusercontent.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
www.powershellgallery.com:443
|
||||
aka.ms:443
|
||||
cdn.powershellgallery.com:443
|
||||
_http._tcp.deb.debian.org:443
|
||||
powershellinfraartifacts-gkhedzdeaghdezhr.z01.azurefd.net:443
|
||||
get.trivy.dev:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -32,6 +32,19 @@ jobs:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
github.com:443
|
||||
auth.safetycli.com:443
|
||||
pyup.io:443
|
||||
data.safetycli.com:443
|
||||
api.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -22,6 +22,9 @@ env:
|
||||
POSTGRES_USER: prowler_user
|
||||
POSTGRES_PASSWORD: prowler
|
||||
POSTGRES_DB: postgres-db
|
||||
VALKEY_SCHEME: redis
|
||||
VALKEY_USERNAME: ""
|
||||
VALKEY_PASSWORD: ""
|
||||
VALKEY_HOST: localhost
|
||||
VALKEY_PORT: 6379
|
||||
VALKEY_DB: 0
|
||||
@@ -72,6 +75,22 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
cli.codecov.io:443
|
||||
keybase.io:443
|
||||
ingest.codecov.io:443
|
||||
storage.googleapis.com:443
|
||||
o26192.ingest.us.sentry.io:443
|
||||
api.github.com:443
|
||||
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -27,6 +27,13 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
|
||||
- name: Check labels
|
||||
id: label_check
|
||||
uses: agilepathway/label-checker@c3d16ad512e7cea5961df85ff2486bb774caf3c5 # v1.6.65
|
||||
|
||||
@@ -33,6 +33,16 @@ jobs:
|
||||
actions: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
ghcr.io:443
|
||||
pkg-containers.githubusercontent.com:443
|
||||
api.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -19,6 +19,11 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Remove 'status/awaiting-response' label
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
@@ -25,6 +25,11 @@ jobs:
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Check PR title format
|
||||
uses: agenthunt/conventional-commit-checker-action@f1823f632e95a64547566dcd2c7da920e67117ad # v2.0.1
|
||||
with:
|
||||
|
||||
@@ -22,6 +22,11 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Create backport label for minor releases
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -27,6 +27,11 @@ jobs:
|
||||
patch_version: ${{ steps.detect.outputs.patch_version }}
|
||||
current_docs_version: ${{ steps.get_docs_version.outputs.current_docs_version }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -79,6 +84,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -204,6 +214,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -22,6 +22,15 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
ghcr.io:443
|
||||
pkg-containers.githubusercontent.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -30,6 +30,11 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
|
||||
@@ -23,6 +23,11 @@ jobs:
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
|
||||
Generated
+30
@@ -65,6 +65,11 @@ jobs:
|
||||
text: ${{ steps.compute-text.outputs.text }}
|
||||
title: ${{ steps.compute-text.outputs.title }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
@@ -129,6 +134,11 @@ jobs:
|
||||
output_types: ${{ steps.collect_output.outputs.output_types }}
|
||||
secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
@@ -859,6 +869,11 @@ jobs:
|
||||
tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
|
||||
total_count: ${{ steps.missing_tool.outputs.total_count }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
@@ -966,6 +981,11 @@ jobs:
|
||||
outputs:
|
||||
success: ${{ steps.parse_results.outputs.success }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
@@ -1070,6 +1090,11 @@ jobs:
|
||||
outputs:
|
||||
activated: ${{ (steps.check_membership.outputs.is_team_member == 'true') && (steps.check_rate_limit.outputs.rate_limit_ok == 'true') }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
@@ -1138,6 +1163,11 @@ jobs:
|
||||
process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
|
||||
process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@9382be3ca9ac18917e111a99d4e6bbff58d0dccc # v0.43.23
|
||||
with:
|
||||
|
||||
@@ -24,6 +24,11 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Apply labels to PR
|
||||
uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1
|
||||
with:
|
||||
@@ -38,6 +43,11 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Check if author is org member
|
||||
id: check_membership
|
||||
env:
|
||||
@@ -65,7 +75,7 @@ jobs:
|
||||
"RosaRivasProwler"
|
||||
"StylusFrost"
|
||||
"toniblyx"
|
||||
"vicferpoy"
|
||||
"davidm4r"
|
||||
)
|
||||
|
||||
echo "Checking if $AUTHOR is a member of prowler-cloud organization"
|
||||
|
||||
@@ -17,9 +17,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
@@ -42,7 +39,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
short-sha: ${{ steps.set-short-sha.outputs.short-sha }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
|
||||
- name: Calculate short SHA
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
@@ -54,7 +58,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -92,6 +103,20 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
ghcr.io:443
|
||||
pkg-containers.githubusercontent.com:443
|
||||
files.pythonhosted.org:443
|
||||
pypi.org:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -132,17 +157,27 @@ jobs:
|
||||
needs: [setup, container-build-push]
|
||||
if: always() && needs.setup.result == 'success' && needs.container-build-push.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
@@ -184,7 +219,14 @@ jobs:
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -227,6 +269,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
|
||||
- name: Trigger MCP deployment
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
with:
|
||||
|
||||
@@ -27,6 +27,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -64,6 +71,23 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
ghcr.io:443
|
||||
pkg-containers.githubusercontent.com:443
|
||||
files.pythonhosted.org:443
|
||||
pypi.org:443
|
||||
api.github.com:443
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -26,6 +26,11 @@ jobs:
|
||||
major_version: ${{ steps.parse-version.outputs.major }}
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Parse and validate version
|
||||
id: parse-version
|
||||
run: |
|
||||
@@ -59,13 +64,18 @@ jobs:
|
||||
url: https://pypi.org/project/prowler-mcp/
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7
|
||||
uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1
|
||||
with:
|
||||
enable-cache: false
|
||||
|
||||
|
||||
@@ -28,6 +28,14 @@ jobs:
|
||||
MONITORED_FOLDERS: 'api ui prowler mcp_server'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
name: 'Tools: Check Compliance Mapping'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
- 'reopened'
|
||||
- 'labeled'
|
||||
- 'unlabeled'
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-compliance-mapping:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'no-compliance-check') == false
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
with:
|
||||
files: |
|
||||
prowler/providers/**/services/**/*.metadata.json
|
||||
prowler/compliance/**/*.json
|
||||
|
||||
- name: Check if new checks are mapped in compliance
|
||||
id: compliance-check
|
||||
run: |
|
||||
ADDED_METADATA="${STEPS_CHANGED_FILES_OUTPUTS_ADDED_FILES}"
|
||||
ALL_CHANGED="${STEPS_CHANGED_FILES_OUTPUTS_ALL_CHANGED_FILES}"
|
||||
|
||||
# Filter only new metadata files (new checks)
|
||||
new_checks=""
|
||||
for f in $ADDED_METADATA; do
|
||||
case "$f" in *.metadata.json) new_checks="$new_checks $f" ;; esac
|
||||
done
|
||||
|
||||
if [ -z "$(echo "$new_checks" | tr -d ' ')" ]; then
|
||||
echo "No new checks detected."
|
||||
echo "has_new_checks=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Collect compliance files changed in this PR
|
||||
changed_compliance=""
|
||||
for f in $ALL_CHANGED; do
|
||||
case "$f" in prowler/compliance/*.json) changed_compliance="$changed_compliance $f" ;; esac
|
||||
done
|
||||
|
||||
UNMAPPED=""
|
||||
MAPPED=""
|
||||
|
||||
for metadata_file in $new_checks; do
|
||||
check_dir=$(dirname "$metadata_file")
|
||||
check_id=$(basename "$check_dir")
|
||||
provider=$(echo "$metadata_file" | cut -d'/' -f3)
|
||||
|
||||
# Read CheckID from the metadata JSON for accuracy
|
||||
if [ -f "$metadata_file" ]; then
|
||||
json_check_id=$(python3 -c "import json; print(json.load(open('$metadata_file')).get('CheckID', ''))" 2>/dev/null || echo "")
|
||||
if [ -n "$json_check_id" ]; then
|
||||
check_id="$json_check_id"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Search for the check ID in compliance files changed in this PR
|
||||
found_in=""
|
||||
for comp_file in $changed_compliance; do
|
||||
if grep -q "\"${check_id}\"" "$comp_file" 2>/dev/null; then
|
||||
found_in="${found_in}$(basename "$comp_file" .json), "
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$found_in" ]; then
|
||||
found_in=$(echo "$found_in" | sed 's/, $//')
|
||||
MAPPED="${MAPPED}- \`${check_id}\` (\`${provider}\`): ${found_in}"$'\n'
|
||||
else
|
||||
UNMAPPED="${UNMAPPED}- \`${check_id}\` (\`${provider}\`)"$'\n'
|
||||
fi
|
||||
done
|
||||
|
||||
echo "has_new_checks=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
if [ -n "$UNMAPPED" ]; then
|
||||
echo "has_unmapped=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "has_unmapped=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "unmapped<<EOF"
|
||||
echo -e "${UNMAPPED}"
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
{
|
||||
echo "mapped<<EOF"
|
||||
echo -e "${MAPPED}"
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
STEPS_CHANGED_FILES_OUTPUTS_ADDED_FILES: ${{ steps.changed-files.outputs.added_files }}
|
||||
STEPS_CHANGED_FILES_OUTPUTS_ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
|
||||
- name: Manage compliance review label
|
||||
if: steps.compliance-check.outputs.has_new_checks == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
HAS_UNMAPPED: ${{ steps.compliance-check.outputs.has_unmapped }}
|
||||
run: |
|
||||
LABEL_NAME="needs-compliance-review"
|
||||
|
||||
if [ "$HAS_UNMAPPED" = "true" ]; then
|
||||
echo "Adding compliance review label to PR #${PR_NUMBER}..."
|
||||
gh pr edit "$PR_NUMBER" --add-label "$LABEL_NAME" --repo "${{ github.repository }}" || true
|
||||
else
|
||||
echo "Removing compliance review label from PR #${PR_NUMBER}..."
|
||||
gh pr edit "$PR_NUMBER" --remove-label "$LABEL_NAME" --repo "${{ github.repository }}" || true
|
||||
fi
|
||||
|
||||
- name: Find existing compliance comment
|
||||
if: steps.compliance-check.outputs.has_new_checks == 'true' && github.event.pull_request.head.repo.full_name == github.repository
|
||||
id: find-comment
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
comment-author: 'github-actions[bot]'
|
||||
body-includes: '<!-- compliance-mapping-check -->'
|
||||
|
||||
- name: Create or update compliance comment
|
||||
if: steps.compliance-check.outputs.has_new_checks == 'true' && github.event.pull_request.head.repo.full_name == github.repository
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
comment-id: ${{ steps.find-comment.outputs.comment-id }}
|
||||
edit-mode: replace
|
||||
body: |
|
||||
<!-- compliance-mapping-check -->
|
||||
## Compliance Mapping Review
|
||||
|
||||
This PR adds new checks. Please verify that they have been mapped to the relevant compliance framework requirements.
|
||||
|
||||
${{ steps.compliance-check.outputs.unmapped != '' && format('### New checks not mapped to any compliance framework in this PR
|
||||
|
||||
{0}
|
||||
|
||||
> Please review whether these checks should be added to compliance framework requirements in `prowler/compliance/<provider>/`. Each compliance JSON has a `Checks` array inside each requirement — add the check ID there if it satisfies that requirement.', steps.compliance-check.outputs.unmapped) || '' }}
|
||||
|
||||
${{ steps.compliance-check.outputs.mapped != '' && format('### New checks already mapped in this PR
|
||||
|
||||
{0}', steps.compliance-check.outputs.mapped) || '' }}
|
||||
|
||||
Use the `no-compliance-check` label to skip this check.
|
||||
@@ -25,6 +25,11 @@ jobs:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout PR head
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -23,6 +23,13 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
|
||||
- name: Calculate short commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
|
||||
@@ -26,6 +26,11 @@ jobs:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -26,6 +26,11 @@ jobs:
|
||||
minor_version: ${{ steps.detect.outputs.minor_version }}
|
||||
patch_version: ${{ steps.detect.outputs.patch_version }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Detect release type and parse version
|
||||
id: detect
|
||||
run: |
|
||||
@@ -66,6 +71,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -175,6 +185,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -19,6 +19,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -24,12 +24,20 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -48,6 +48,16 @@ jobs:
|
||||
- 'python'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
uploads.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -23,9 +23,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
@@ -45,6 +42,7 @@ env:
|
||||
# Container registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler
|
||||
TONIBLYX_DOCKERHUB_REPOSITORY: toniblyx
|
||||
|
||||
# AWS configuration (for ECR)
|
||||
AWS_REGION: us-east-1
|
||||
@@ -59,7 +57,18 @@ jobs:
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -115,7 +124,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -154,6 +170,27 @@ jobs:
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.ecr-public.us-east-1.amazonaws.com:443
|
||||
public.ecr.aws:443
|
||||
registry-1.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
auth.docker.io:443
|
||||
debian.map.fastlydns.net:80
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
www.powershellgallery.com:443
|
||||
aka.ms:443
|
||||
cdn.powershellgallery.com:443
|
||||
_http._tcp.deb.debian.org:443
|
||||
powershellinfraartifacts-gkhedzdeaghdezhr.z01.azurefd.net:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -196,8 +233,24 @@ jobs:
|
||||
needs: [setup, container-build-push]
|
||||
if: always() && needs.setup.result == 'success' && needs.container-build-push.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
public.ecr.aws:443
|
||||
production.cloudflare.docker.com:443
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
api.ecr-public.us-east-1.amazonaws.com:443
|
||||
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
@@ -213,15 +266,11 @@ jobs:
|
||||
env:
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG}-arm64
|
||||
@@ -232,12 +281,10 @@ jobs:
|
||||
if: github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_PROWLER_VERSION} \
|
||||
-t ${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_STABLE_TAG} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_PROWLER_VERSION} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_STABLE_TAG} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_PROWLER_VERSION} \
|
||||
-t ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_STABLE_TAG} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_PROWLER_VERSION} \
|
||||
-t ${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${NEEDS_SETUP_OUTPUTS_STABLE_TAG} \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG}-amd64 \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_LATEST_TAG}-arm64
|
||||
env:
|
||||
@@ -245,6 +292,39 @@ jobs:
|
||||
NEEDS_SETUP_OUTPUTS_STABLE_TAG: ${{ needs.setup.outputs.stable_tag }}
|
||||
NEEDS_SETUP_OUTPUTS_LATEST_TAG: ${{ needs.setup.outputs.latest_tag }}
|
||||
|
||||
# Push to toniblyx/prowler only for current version (latest/stable/release tags)
|
||||
- name: Login to DockerHub (toniblyx)
|
||||
if: needs.setup.outputs.latest_tag == 'latest'
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.TONIBLYX_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.TONIBLYX_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Push manifests to toniblyx for push event
|
||||
if: needs.setup.outputs.latest_tag == 'latest' && github.event_name == 'push'
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.TONIBLYX_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:latest \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:latest
|
||||
|
||||
- name: Push manifests to toniblyx for release event
|
||||
if: needs.setup.outputs.latest_tag == 'latest' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.TONIBLYX_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${NEEDS_SETUP_OUTPUTS_PROWLER_VERSION} \
|
||||
-t ${{ env.TONIBLYX_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:stable \
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:stable
|
||||
env:
|
||||
NEEDS_SETUP_OUTPUTS_PROWLER_VERSION: ${{ needs.setup.outputs.prowler_version }}
|
||||
|
||||
# Re-login as prowlercloud for cleanup of intermediate tags
|
||||
- name: Login to DockerHub (prowlercloud)
|
||||
if: always()
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@da9319db8e44e8b062b3a147e1dfb2f574d41a03 # main
|
||||
@@ -264,7 +344,14 @@ jobs:
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -307,6 +394,11 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Calculate short SHA
|
||||
id: short-sha
|
||||
run: echo "short_sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
@@ -26,6 +26,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -64,6 +71,29 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
api.github.com:443
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
debian.map.fastlydns.net:80
|
||||
release-assets.githubusercontent.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
www.powershellgallery.com:443
|
||||
aka.ms:443
|
||||
cdn.powershellgallery.com:443
|
||||
_http._tcp.deb.debian.org:443
|
||||
powershellinfraartifacts-gkhedzdeaghdezhr.z01.azurefd.net:443
|
||||
get.trivy.dev:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -25,6 +25,11 @@ jobs:
|
||||
major_version: ${{ steps.parse-version.outputs.major }}
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Parse and validate version
|
||||
id: parse-version
|
||||
run: |
|
||||
@@ -58,6 +63,11 @@ jobs:
|
||||
url: https://pypi.org/project/prowler/${{ needs.validate-release.outputs.prowler_version }}/
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -91,6 +101,11 @@ jobs:
|
||||
url: https://pypi.org/project/prowler-cloud/${{ needs.validate-release.outputs.prowler_version }}/
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -24,6 +24,11 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -22,6 +22,11 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -23,6 +23,19 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
github.com:443
|
||||
auth.safetycli.com:443
|
||||
pyup.io:443
|
||||
data.safetycli.com:443
|
||||
api.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -24,12 +24,41 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
api.github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
*.amazonaws.com:443
|
||||
*.googleapis.com:443
|
||||
schema.ocsf.io:443
|
||||
registry-1.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
powershellinfraartifacts-gkhedzdeaghdezhr.z01.azurefd.net:443
|
||||
o26192.ingest.us.sentry.io:443
|
||||
management.azure.com:443
|
||||
login.microsoftonline.com:443
|
||||
keybase.io:443
|
||||
ingest.codecov.io:443
|
||||
graph.microsoft.com:443
|
||||
dc.services.visualstudio.com:443
|
||||
cloud.mongodb.com:443
|
||||
cli.codecov.io:443
|
||||
auth.docker.io:443
|
||||
api.vercel.com:443
|
||||
api.atlassian.com:443
|
||||
aka.ms:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -470,6 +499,30 @@ jobs:
|
||||
flags: prowler-py${{ matrix.python-version }}-googleworkspace
|
||||
files: ./googleworkspace_coverage.xml
|
||||
|
||||
# Vercel Provider
|
||||
- name: Check if Vercel files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-vercel
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/vercel/**
|
||||
./tests/**/vercel/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Vercel tests
|
||||
if: steps.changed-vercel.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/vercel --cov-report=xml:vercel_coverage.xml tests/providers/vercel
|
||||
|
||||
- name: Upload Vercel coverage to Codecov
|
||||
if: steps.changed-vercel.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-vercel
|
||||
files: ./vercel_coverage.xml
|
||||
|
||||
# Lib
|
||||
- name: Check if Lib files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -45,8 +45,19 @@ jobs:
|
||||
has-sdk-tests: ${{ steps.set-flags.outputs.has-sdk-tests }}
|
||||
has-api-tests: ${{ steps.set-flags.outputs.has-api-tests }}
|
||||
has-ui-e2e: ${{ steps.set-flags.outputs.has-ui-e2e }}
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -26,6 +26,11 @@ jobs:
|
||||
minor_version: ${{ steps.detect.outputs.minor_version }}
|
||||
patch_version: ${{ steps.detect.outputs.patch_version }}
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Detect release type and parse version
|
||||
id: detect
|
||||
run: |
|
||||
@@ -66,6 +71,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -89,7 +99,7 @@ jobs:
|
||||
run: |
|
||||
set -e
|
||||
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_MINOR_VERSION}|" .env
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=.*|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_MINOR_VERSION}|" .env
|
||||
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
@@ -143,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
set -e
|
||||
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${FIRST_PATCH_VERSION}|" .env
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=.*|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${FIRST_PATCH_VERSION}|" .env
|
||||
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
@@ -179,6 +189,11 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -208,7 +223,7 @@ jobs:
|
||||
run: |
|
||||
set -e
|
||||
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_PATCH_VERSION}|" .env
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=.*|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_PATCH_VERSION}|" .env
|
||||
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
|
||||
@@ -44,6 +44,16 @@ jobs:
|
||||
- 'javascript-typescript'
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
uploads.github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -17,9 +17,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
@@ -45,7 +42,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
short-sha: ${{ steps.set-short-sha.outputs.short-sha }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Calculate short SHA
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
@@ -57,7 +61,14 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -96,6 +107,20 @@ jobs:
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
registry-1.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
auth.docker.io:443
|
||||
registry.npmjs.org:443
|
||||
dl-cdn.alpinelinux.org:443
|
||||
fonts.googleapis.com:443
|
||||
fonts.gstatic.com:443
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -131,17 +156,27 @@ jobs:
|
||||
needs: [setup, container-build-push]
|
||||
if: always() && needs.setup.result == 'success' && needs.container-build-push.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Create and push manifests for push event
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
@@ -183,7 +218,14 @@ jobs:
|
||||
needs: [setup, notify-release-started, container-build-push, create-manifest]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -226,6 +268,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
api.github.com:443
|
||||
|
||||
- name: Trigger UI deployment
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
with:
|
||||
|
||||
@@ -27,6 +27,13 @@ jobs:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -65,6 +72,24 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
registry-1.docker.io:443
|
||||
auth.docker.io:443
|
||||
production.cloudflare.docker.com:443
|
||||
registry.npmjs.org:443
|
||||
dl-cdn.alpinelinux.org:443
|
||||
fonts.googleapis.com:443
|
||||
fonts.gstatic.com:443
|
||||
api.github.com:443
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
get.trivy.dev:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
|
||||
@@ -15,13 +15,12 @@ on:
|
||||
- 'ui/**'
|
||||
- 'api/**' # API changes can affect UI E2E
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
# First, analyze which tests need to run
|
||||
impact-analysis:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-impact-analysis.yml
|
||||
|
||||
# Run E2E tests based on impact analysis
|
||||
@@ -75,8 +74,15 @@ jobs:
|
||||
# Pass E2E paths from impact analysis
|
||||
E2E_TEST_PATHS: ${{ needs.impact-analysis.outputs.ui-e2e }}
|
||||
RUN_ALL_TESTS: ${{ needs.impact-analysis.outputs.run-all }}
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -157,7 +163,7 @@ jobs:
|
||||
node-version: '24.13.0'
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4
|
||||
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4.2.0
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
@@ -273,7 +279,14 @@ jobs:
|
||||
needs.impact-analysis.outputs.has-ui-e2e != 'true' &&
|
||||
needs.impact-analysis.outputs.run-all != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: No E2E tests needed
|
||||
run: |
|
||||
echo "## E2E Tests Skipped" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
@@ -29,6 +29,18 @@ jobs:
|
||||
working-directory: ./ui
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: block
|
||||
allowed-endpoints: >
|
||||
github.com:443
|
||||
registry.npmjs.org:443
|
||||
fonts.googleapis.com:443
|
||||
fonts.gstatic.com:443
|
||||
api.github.com:443
|
||||
release-assets.githubusercontent.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
@@ -84,7 +96,7 @@ jobs:
|
||||
|
||||
- name: Setup pnpm
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4
|
||||
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4.2.0
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
rules:
|
||||
secrets-outside-env:
|
||||
ignore:
|
||||
- api-bump-version.yml
|
||||
- api-container-build-push.yml
|
||||
- api-tests.yml
|
||||
- backport.yml
|
||||
- docs-bump-version.yml
|
||||
- issue-triage.lock.yml
|
||||
- mcp-container-build-push.yml
|
||||
- pr-merged.yml
|
||||
- prepare-release.yml
|
||||
- sdk-bump-version.yml
|
||||
- sdk-container-build-push.yml
|
||||
- sdk-refresh-aws-services-regions.yml
|
||||
- sdk-refresh-oci-regions.yml
|
||||
- sdk-tests.yml
|
||||
- ui-bump-version.yml
|
||||
- ui-container-build-push.yml
|
||||
- ui-e2e-tests-v2.yml
|
||||
superfluous-actions:
|
||||
ignore:
|
||||
- pr-check-changelog.yml
|
||||
- pr-conflict-checker.yml
|
||||
- prepare-release.yml
|
||||
@@ -140,7 +140,7 @@ Prowler is an open-source cloud security assessment tool supporting AWS, Azure,
|
||||
|
||||
| Component | Location | Tech Stack |
|
||||
|-----------|----------|------------|
|
||||
| SDK | `prowler/` | Python 3.9+, Poetry |
|
||||
| SDK | `prowler/` | Python 3.10+, Poetry |
|
||||
| API | `api/` | Django 5.1, DRF, Celery |
|
||||
| UI | `ui/` | Next.js 15, React 19, Tailwind 4 |
|
||||
| MCP Server | `mcp_server/` | FastMCP, Python 3.12+ |
|
||||
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
FROM python:3.12.11-slim-bookworm AS build
|
||||
FROM python:3.12.11-slim-bookworm@sha256:519591d6871b7bc437060736b9f7456b8731f1499a57e22e6c285135ae657bf7 AS build
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/prowler"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prowler-cloud/prowler"
|
||||
|
||||
@@ -119,6 +119,7 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
|
||||
| Image | N/A | N/A | N/A | N/A | Official | CLI, API |
|
||||
| Google Workspace | 1 | 1 | 0 | 1 | Official | CLI |
|
||||
| OpenStack | 27 | 4 | 0 | 8 | Official | UI, API, CLI |
|
||||
| Vercel | 30 | 6 | 0 | 5 | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
> [!Note]
|
||||
@@ -239,9 +240,24 @@ pnpm start
|
||||
|
||||
> Once configured, access the Prowler App at http://localhost:3000. Sign up using your email and password to get started.
|
||||
|
||||
**Pre-commit Hooks Setup**
|
||||
|
||||
Some pre-commit hooks require tools installed on your system:
|
||||
|
||||
1. **Install [TruffleHog](https://github.com/trufflesecurity/trufflehog#install)** (secret scanning) — see the [official installation options](https://github.com/trufflesecurity/trufflehog#install).
|
||||
|
||||
2. **Install [Safety](https://github.com/pyupio/safety)** (dependency vulnerability checking):
|
||||
|
||||
```console
|
||||
# Requires a Python environment (e.g. via pyenv)
|
||||
pip install safety
|
||||
```
|
||||
|
||||
3. **Install [Hadolint](https://github.com/hadolint/hadolint#install)** (Dockerfile linting) — see the [official installation options](https://github.com/hadolint/hadolint#install).
|
||||
|
||||
## Prowler CLI
|
||||
### Pip package
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/). Consequently, it can be installed using pip with Python >3.9.1, <3.13:
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/). Consequently, it can be installed using pip with Python >=3.10, <3.13:
|
||||
|
||||
```console
|
||||
pip install prowler
|
||||
@@ -273,7 +289,7 @@ The container images are available here:
|
||||
|
||||
### From GitHub
|
||||
|
||||
Python >3.9.1, <3.13 is required with pip and Poetry:
|
||||
Python >=3.10, <3.13 is required with pip and Poetry:
|
||||
|
||||
``` console
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
|
||||
@@ -2,6 +2,35 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.24.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- `VALKEY_SCHEME`, `VALKEY_USERNAME`, and `VALKEY_PASSWORD` environment variables to configure Celery broker TLS/auth connection details for Valkey/ElastiCache [(#10420)](https://github.com/prowler-cloud/prowler/pull/10420)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Attack Paths: Periodic cleanup of stale scans with dead-worker detection via Celery inspect, marking orphaned `EXECUTING` scans as `FAILED` and recovering `graph_data_ready` [(#10387)](https://github.com/prowler-cloud/prowler/pull/10387)
|
||||
- Attack Paths: Replace `_provider_id` property with `_Provider_{uuid}` label for provider isolation, add regex-based label injection for custom queries [(#10402)](https://github.com/prowler-cloud/prowler/pull/10402)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Finding groups list/latest now apply computed status/severity filters and finding-level prefilters (delta, region, service, category, resource group, scan, resource type), plus `check_title` support for sort/filter consistency [(#10428)](https://github.com/prowler-cloud/prowler/pull/10428)
|
||||
- Populate compliance data inside `check_metadata` for findings, which was always returned as `null` [(#10449)](https://github.com/prowler-cloud/prowler/pull/10449)
|
||||
- 403 error for admin users listing tenants due to roles query not using the admin database connection [(#10460)](https://github.com/prowler-cloud/prowler/pull/10460)
|
||||
- Filter transient Neo4j defunct connection logs in Sentry `before_send` to suppress false-positive alerts handled by `RetryableSession` retries [(#10452)](https://github.com/prowler-cloud/prowler/pull/10452)
|
||||
- `MANAGE_ACCOUNT` permission no longer required for listing and creating tenants [(#10468)](https://github.com/prowler-cloud/prowler/pull/10468)
|
||||
- Finding groups muted filter, counters, metadata extraction and mute reaggregation [(#10477)](https://github.com/prowler-cloud/prowler/pull/10477)
|
||||
- Finding groups `check_title__icontains` resolution, `name__icontains` resource filter and `resource_group` field in `/resources` response [(#10486)](https://github.com/prowler-cloud/prowler/pull/10486)
|
||||
- Membership `post_delete` signal using raw FK ids to avoid `DoesNotExist` during cascade deletions [(#10497)](https://github.com/prowler-cloud/prowler/pull/10497)
|
||||
- Finding group resources endpoints returning false 404 when filters match no results, and `sort` parameter being ignored [(#10510)](https://github.com/prowler-cloud/prowler/pull/10510)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- Pin all unpinned dependencies to exact versions to prevent supply chain attacks and ensure reproducible builds [(#10469)](https://github.com/prowler-cloud/prowler/pull/10469)
|
||||
|
||||
---
|
||||
|
||||
## [1.23.0] (Prowler v5.22.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
+1
-1
@@ -1,4 +1,4 @@
|
||||
FROM python:3.12.10-slim-bookworm AS build
|
||||
FROM python:3.12.10-slim-bookworm@sha256:fd95fa221297a88e1cf49c55ec1828edd7c5a428187e67b5d1805692d11588db AS build
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/api"
|
||||
|
||||
|
||||
@@ -30,9 +30,28 @@ start_prod_server() {
|
||||
poetry run gunicorn -c config/guniconf.py config.wsgi:application
|
||||
}
|
||||
|
||||
resolve_worker_hostname() {
|
||||
TASK_ID=""
|
||||
|
||||
if [ -n "$ECS_CONTAINER_METADATA_URI_V4" ]; then
|
||||
TASK_ID=$(wget -qO- --timeout=2 "${ECS_CONTAINER_METADATA_URI_V4}/task" | \
|
||||
python3 -c "import sys,json; print(json.load(sys.stdin)['TaskARN'].split('/')[-1])" 2>/dev/null)
|
||||
fi
|
||||
|
||||
if [ -z "$TASK_ID" ]; then
|
||||
TASK_ID=$(python3 -c "import uuid; print(uuid.uuid4().hex)")
|
||||
fi
|
||||
|
||||
echo "${TASK_ID}@$(hostname)"
|
||||
}
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion,backfill,overview,integrations,compliance,attack-paths-scans -E --max-tasks-per-child 1
|
||||
poetry run python -m celery -A config.celery worker \
|
||||
-n "$(resolve_worker_hostname)" \
|
||||
-l "${DJANGO_LOGGING_LEVEL:-info}" \
|
||||
-Q celery,scans,scan-reports,deletion,backfill,overview,integrations,compliance,attack-paths-scans \
|
||||
-E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
Generated
+102
-107
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "about-time"
|
||||
@@ -2469,22 +2469,18 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
||||
|
||||
[[package]]
|
||||
name = "cron-descriptor"
|
||||
version = "2.0.6"
|
||||
version = "1.4.5"
|
||||
description = "A Python library that converts cron expressions into human readable strings."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cron_descriptor-2.0.6-py3-none-any.whl", hash = "sha256:3a1c0d837c0e5a32e415f821b36cf758eb92d510e6beff8fbfe4fa16573d93d6"},
|
||||
{file = "cron_descriptor-2.0.6.tar.gz", hash = "sha256:e39d2848e1d8913cfb6e3452e701b5eec662ee18bea8cc5aa53ee1a7bb217157"},
|
||||
{file = "cron_descriptor-1.4.5-py3-none-any.whl", hash = "sha256:736b3ae9d1a99bc3dbfc5b55b5e6e7c12031e7ba5de716625772f8b02dcd6013"},
|
||||
{file = "cron_descriptor-1.4.5.tar.gz", hash = "sha256:f51ce4ffc1d1f2816939add8524f206c376a42c87a5fca3091ce26725b3b1bca"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing_extensions = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["mypy", "polib", "ruff"]
|
||||
test = ["pytest"]
|
||||
dev = ["polib"]
|
||||
|
||||
[[package]]
|
||||
name = "crowdstrike-falconpy"
|
||||
@@ -2801,14 +2797,14 @@ bcrypt = ["bcrypt"]
|
||||
|
||||
[[package]]
|
||||
name = "django-allauth"
|
||||
version = "65.14.0"
|
||||
version = "65.15.0"
|
||||
description = "Integrated set of Django applications addressing authentication, registration, account management as well as 3rd party (social) account authentication."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "django_allauth-65.14.0-py3-none-any.whl", hash = "sha256:448f5f7877f95fcbe1657256510fe7822d7871f202521a29e23ef937f3325a97"},
|
||||
{file = "django_allauth-65.14.0.tar.gz", hash = "sha256:5529227aba2b1377d900e9274a3f24496c645e65400fbae3cad5789944bc4d0b"},
|
||||
{file = "django_allauth-65.15.0-py3-none-any.whl", hash = "sha256:ad9fc49c49a9368eaa5bb95456b76e2a4f377b3c6862ee8443507816578c098d"},
|
||||
{file = "django_allauth-65.15.0.tar.gz", hash = "sha256:b404d48cf0c3ee14dacc834c541f30adedba2ff1c433980ecc494d6cb0b395a8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2831,20 +2827,20 @@ steam = ["python3-openid (>=3.0.8,<4)"]
|
||||
|
||||
[[package]]
|
||||
name = "django-celery-beat"
|
||||
version = "2.8.1"
|
||||
version = "2.9.0"
|
||||
description = "Database-backed Periodic Tasks."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "django_celery_beat-2.8.1-py3-none-any.whl", hash = "sha256:da2b1c6939495c05a551717509d6e3b79444e114a027f7b77bf3727c2a39d171"},
|
||||
{file = "django_celery_beat-2.8.1.tar.gz", hash = "sha256:dfad0201c0ac50c91a34700ef8fa0a10ee098cc7f3375fe5debed79f2204f80a"},
|
||||
{file = "django_celery_beat-2.9.0-py3-none-any.whl", hash = "sha256:4a9e5ebe26d6f8d7215e1fc5c46e466016279dc102435a28141108649bdf2157"},
|
||||
{file = "django_celery_beat-2.9.0.tar.gz", hash = "sha256:92404650f52fcb44cf08e2b09635cb1558327c54b1a5d570f0e2d3a22130934c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
celery = ">=5.2.3,<6.0"
|
||||
cron-descriptor = ">=1.2.32"
|
||||
Django = ">=2.2,<6.0"
|
||||
cron-descriptor = ">=1.2.32,<2.0.0"
|
||||
Django = ">=2.2,<6.1"
|
||||
django-timezone-field = ">=5.0"
|
||||
python-crontab = ">=2.3.4"
|
||||
tzdata = "*"
|
||||
@@ -2965,7 +2961,7 @@ files = [
|
||||
[package.dependencies]
|
||||
autopep8 = "*"
|
||||
Django = ">=4.2"
|
||||
gprof2dot = ">=2017.09.19"
|
||||
gprof2dot = ">=2017.9.19"
|
||||
sqlparse = "*"
|
||||
|
||||
[[package]]
|
||||
@@ -3376,62 +3372,62 @@ dotenv = ["python-dotenv"]
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.61.1"
|
||||
version = "4.62.1"
|
||||
description = "Tools to manipulate font files"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"},
|
||||
{file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"},
|
||||
{file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ad5cca75776cd453b1b035b530e943334957ae152a36a88a320e779d61fc980c"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b3ae47e8636156a9accff64c02c0924cbebad62854c4a6dbdc110cd5b4b341a"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9b9e288b4da2f64fd6180644221749de651703e8d0c16bd4b719533a3a7d6e3"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7bca7a1c1faf235ffe25d4f2e555246b4750220b38de8261d94ebc5ce8a23c23"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4e0fcf265ad26e487c56cb12a42dffe7162de708762db951e1b3f755319507d"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2d850f66830a27b0d498ee05adb13a3781637b1826982cd7e2b3789ef0cc71ae"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-win32.whl", hash = "sha256:486f32c8047ccd05652aba17e4a8819a3a9d78570eb8a0e3b4503142947880ed"},
|
||||
{file = "fonttools-4.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:5a648bde915fba9da05ae98856987ca91ba832949a9e2888b48c47ef8b96c5a9"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:40975849bac44fb0b9253d77420c6d8b523ac4dcdcefeff6e4d706838a5b80f7"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9dde91633f77fa576879a0c76b1d89de373cae751a98ddf0109d54e173b40f14"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6acb4109f8bee00fec985c8c7afb02299e35e9c94b57287f3ea542f28bd0b0a7"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1c5c25671ce8805e0d080e2ffdeca7f1e86778c5cbfbeae86d7f866d8830517b"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a5d8825e1140f04e6c99bb7d37a9e31c172f3bc208afbe02175339e699c710e1"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:268abb1cb221e66c014acc234e872b7870d8b5d4657a83a8f4205094c32d2416"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-win32.whl", hash = "sha256:942b03094d7edbb99bdf1ae7e9090898cad7bf9030b3d21f33d7072dbcb51a53"},
|
||||
{file = "fonttools-4.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:e8514f4924375f77084e81467e63238b095abda5107620f49421c368a6017ed2"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:90365821debbd7db678809c7491ca4acd1e0779b9624cdc6ddaf1f31992bf974"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12859ff0b47dd20f110804c3e0d0970f7b832f561630cd879969011541a464a9"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c125ffa00c3d9003cdaaf7f2c79e6e535628093e14b5de1dccb08859b680936"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:149f7d84afca659d1a97e39a4778794a2f83bf344c5ee5134e09995086cc2392"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0aa72c43a601cfa9273bb1ae0518f1acadc01ee181a6fc60cd758d7fdadffc04"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:19177c8d96c7c36359266e571c5173bcee9157b59cfc8cb0153c5673dc5a3a7d"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-win32.whl", hash = "sha256:a24decd24d60744ee8b4679d38e88b8303d86772053afc29b19d23bb8207803c"},
|
||||
{file = "fonttools-4.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:9e7863e10b3de72376280b515d35b14f5eeed639d1aa7824f4cf06779ec65e42"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c22b1014017111c401469e3acc5433e6acf6ebcc6aa9efb538a533c800971c79"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:68959f5fc58ed4599b44aad161c2837477d7f35f5f79402d97439974faebfebe"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef46db46c9447103b8f3ff91e8ba009d5fe181b1920a83757a5762551e32bb68"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6706d1cb1d5e6251a97ad3c1b9347505c5615c112e66047abbef0f8545fa30d1"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2e7abd2b1e11736f58c1de27819e1955a53267c21732e78243fa2fa2e5c1e069"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:403d28ce06ebfc547fbcb0cb8b7f7cc2f7a2d3e1a67ba9a34b14632df9e080f9"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-win32.whl", hash = "sha256:93c316e0f5301b2adbe6a5f658634307c096fd5aae60a5b3412e4f3e1728ab24"},
|
||||
{file = "fonttools-4.62.1-cp313-cp313-win_amd64.whl", hash = "sha256:7aa21ff53e28a9c2157acbc44e5b401149d3c9178107130e82d74ceb500e5056"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fa1d16210b6b10a826d71bed68dd9ec24a9e218d5a5e2797f37c573e7ec215ca"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:aa69d10ed420d8121118e628ad47d86e4caa79ba37f968597b958f6cceab7eca"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd13b7999d59c5eb1c2b442eb2d0c427cb517a0b7a1f5798fc5c9e003f5ff782"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8d337fdd49a79b0d51c4da87bc38169d21c3abbf0c1aa9367eff5c6656fb6dae"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d241cdc4a67b5431c6d7f115fdf63335222414995e3a1df1a41e1182acd4bcc7"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c05557a78f8fa514da0f869556eeda40887a8abc77c76ee3f74cf241778afd5a"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-win32.whl", hash = "sha256:49a445d2f544ce4a69338694cad575ba97b9a75fff02720da0882d1a73f12800"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314-win_amd64.whl", hash = "sha256:1eecc128c86c552fb963fe846ca4e011b1be053728f798185a1687502f6d398e"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:1596aeaddf7f78e21e68293c011316a25267b3effdaccaf4d59bc9159d681b82"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:8f8fca95d3bb3208f59626a4b0ea6e526ee51f5a8ad5d91821c165903e8d9260"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee91628c08e76f77b533d65feb3fbe6d9dad699f95be51cf0d022db94089cdc4"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f37df1cac61d906e7b836abe356bc2f34c99d4477467755c216b72aa3dc748b"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:92bb00a947e666169c99b43753c4305fc95a890a60ef3aeb2a6963e07902cc87"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bdfe592802ef939a0e33106ea4a318eeb17822c7ee168c290273cbd5fabd746c"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-win32.whl", hash = "sha256:b820fcb92d4655513d8402d5b219f94481c4443d825b4372c75a2072aa4b357a"},
|
||||
{file = "fonttools-4.62.1-cp314-cp314t-win_amd64.whl", hash = "sha256:59b372b4f0e113d3746b88985f1c796e7bf830dd54b28374cd85c2b8acd7583e"},
|
||||
{file = "fonttools-4.62.1-py3-none-any.whl", hash = "sha256:7487782e2113861f4ddcc07c3436450659e3caa5e470b27dc2177cade2d8e7fd"},
|
||||
{file = "fonttools-4.62.1.tar.gz", hash = "sha256:e54c75fd6041f1122476776880f7c3c3295ffa31962dc6ebe2543c00dca58b5d"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -4573,7 +4569,7 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
jsonschema-specifications = ">=2023.3.6"
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.7.1"
|
||||
|
||||
@@ -4781,7 +4777,7 @@ librabbitmq = ["librabbitmq (>=2.0.0) ; python_version < \"3.11\""]
|
||||
mongodb = ["pymongo (==4.15.3)"]
|
||||
msgpack = ["msgpack (==1.1.2)"]
|
||||
pyro = ["pyro4 (==4.82)"]
|
||||
qpid = ["qpid-python (==1.36.0-1)", "qpid-tools (==1.36.0-1)"]
|
||||
qpid = ["qpid-python (==1.36.0.post1)", "qpid-tools (==1.36.0.post1)"]
|
||||
redis = ["redis (>=4.5.2,!=4.5.5,!=5.0.2,<6.5)"]
|
||||
slmq = ["softlayer_messaging (>=1.0.3)"]
|
||||
sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"]
|
||||
@@ -4802,7 +4798,7 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=14.05.14"
|
||||
certifi = ">=14.5.14"
|
||||
durationpy = ">=0.7"
|
||||
google-auth = ">=1.0.1"
|
||||
oauthlib = ">=3.2.2"
|
||||
@@ -5046,18 +5042,18 @@ tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "markdown"
|
||||
version = "3.9"
|
||||
version = "3.10.2"
|
||||
description = "Python implementation of John Gruber's Markdown."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280"},
|
||||
{file = "markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a"},
|
||||
{file = "markdown-3.10.2-py3-none-any.whl", hash = "sha256:e91464b71ae3ee7afd3017d9f358ef0baf158fd9a298db92f1d4761133824c36"},
|
||||
{file = "markdown-3.10.2.tar.gz", hash = "sha256:994d51325d25ad8aa7ce4ebaec003febcce822c3f8c911e3b17c52f7f589f950"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"]
|
||||
docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python] (>=0.28.3)"]
|
||||
testing = ["coverage", "pyyaml"]
|
||||
|
||||
[[package]]
|
||||
@@ -5831,14 +5827,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "nltk"
|
||||
version = "3.9.2"
|
||||
version = "3.9.4"
|
||||
description = "Natural Language Toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "nltk-3.9.2-py3-none-any.whl", hash = "sha256:1e209d2b3009110635ed9709a67a1a3e33a10f799490fa71cf4bec218c11c88a"},
|
||||
{file = "nltk-3.9.2.tar.gz", hash = "sha256:0f409e9b069ca4177c1903c3e843eef90c7e92992fa4931ae607da6de49e1419"},
|
||||
{file = "nltk-3.9.4-py3-none-any.whl", hash = "sha256:f2fa301c3a12718ce4a0e9305c5675299da5ad9e26068218b69d692fda84828f"},
|
||||
{file = "nltk-3.9.4.tar.gz", hash = "sha256:ed03bc098a40481310320808b2db712d95d13ca65b27372f8a403949c8b523d0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -6636,10 +6632,10 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "prowler"
|
||||
version = "5.22.0"
|
||||
version = "5.23.0"
|
||||
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
|
||||
optional = false
|
||||
python-versions = ">3.9.1,<3.13"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
groups = ["main"]
|
||||
files = []
|
||||
develop = false
|
||||
@@ -6702,7 +6698,7 @@ google-auth-httplib2 = ">=0.1,<0.3"
|
||||
h2 = "4.3.0"
|
||||
jsonschema = "4.23.0"
|
||||
kubernetes = "32.0.1"
|
||||
markdown = "3.9.0"
|
||||
markdown = "3.10.2"
|
||||
microsoft-kiota-abstractions = "1.9.2"
|
||||
msgraph-sdk = "1.23.0"
|
||||
numpy = "2.0.2"
|
||||
@@ -6726,7 +6722,7 @@ uuid6 = "2024.7.10"
|
||||
type = "git"
|
||||
url = "https://github.com/prowler-cloud/prowler.git"
|
||||
reference = "master"
|
||||
resolved_reference = "41629137efdec1ade078e4386f738c8e0ffce94b"
|
||||
resolved_reference = "2ddd5b3091bcdd8c7d44aba73b13c5c6f8f99e35"
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
@@ -7165,7 +7161,7 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
astroid = ">=3.2.2,<=3.3.0-dev0"
|
||||
astroid = ">=3.2.2,<=3.3.0.dev0"
|
||||
colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
|
||||
dill = [
|
||||
{version = ">=0.3.7", markers = "python_version >= \"3.12\""},
|
||||
@@ -7349,14 +7345,14 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments
|
||||
|
||||
[[package]]
|
||||
name = "pytest-celery"
|
||||
version = "1.2.1"
|
||||
version = "1.3.0"
|
||||
description = "Pytest plugin for Celery"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest_celery-1.2.1-py3-none-any.whl", hash = "sha256:0441ab0c2a712b775be16ffda3d7deb31995fd7b5e9d71630e7ea98b474346a3"},
|
||||
{file = "pytest_celery-1.2.1.tar.gz", hash = "sha256:7873fb3cf4fbfe9b0dd15d359bdb8bbab4a41c7e48f5b0adb7d36138d3704d52"},
|
||||
{file = "pytest_celery-1.3.0-py3-none-any.whl", hash = "sha256:f02201d7770584a0c412a1ded329a142170c24012467c7046f2c72cc8205ad5d"},
|
||||
{file = "pytest_celery-1.3.0.tar.gz", hash = "sha256:bd9e5b0f594ec5de9ab97cf27e3a11c644718a761bab6b997d01800fd7394f64"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7367,14 +7363,13 @@ kombu = "*"
|
||||
psutil = ">=7.0.0"
|
||||
pytest-docker-tools = ">=3.1.3"
|
||||
redis = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"redis\""}
|
||||
setuptools = {version = ">=75.8.0", markers = "python_version >= \"3.9\" and python_version < \"4.0\""}
|
||||
tenacity = ">=9.0.0"
|
||||
|
||||
[package.extras]
|
||||
all = ["boto3", "botocore", "python-memcached", "redis", "urllib3 (>=1.26.16,<2.0)"]
|
||||
all = ["boto3", "botocore", "pycurl (>=7.43) ; sys_platform != \"win32\" and platform_python_implementation == \"CPython\"", "python-memcached", "redis", "urllib3 (>=1.26.16,<2.0)"]
|
||||
memcached = ["python-memcached"]
|
||||
redis = ["redis"]
|
||||
sqs = ["boto3", "botocore", "urllib3 (>=1.26.16,<2.0)"]
|
||||
sqs = ["boto3", "botocore", "pycurl (>=7.43) ; sys_platform != \"win32\" and platform_python_implementation == \"CPython\"", "urllib3 (>=1.26.16,<2.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-cov"
|
||||
@@ -7859,14 +7854,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "reportlab"
|
||||
version = "4.4.9"
|
||||
version = "4.4.10"
|
||||
description = "The Reportlab Toolkit"
|
||||
optional = false
|
||||
python-versions = "<4,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "reportlab-4.4.9-py3-none-any.whl", hash = "sha256:68e2d103ae8041a37714e8896ec9b79a1c1e911d68c3bd2ea17546568cf17bfd"},
|
||||
{file = "reportlab-4.4.9.tar.gz", hash = "sha256:7cf487764294ee791a4781f5a157bebce262a666ae4bbb87786760a9676c9378"},
|
||||
{file = "reportlab-4.4.10-py3-none-any.whl", hash = "sha256:5abc815746ae2bc44e7ff25db96814f921349ca814c992c7eac3c26029bf7c24"},
|
||||
{file = "reportlab-4.4.10.tar.gz", hash = "sha256:5cbbb34ac3546039d0086deb2938cdec06b12da3cdb836e813258eb33cd28487"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -8179,10 +8174,10 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
botocore = ">=1.37.4,<2.0a.0"
|
||||
botocore = ">=1.37.4,<2.0a0"
|
||||
|
||||
[package.extras]
|
||||
crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"]
|
||||
crt = ["botocore[crt] (>=1.37.4,<2.0a0)"]
|
||||
|
||||
[[package]]
|
||||
name = "safety"
|
||||
@@ -8288,14 +8283,14 @@ contextlib2 = ">=0.5.5"
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "2.51.0"
|
||||
version = "2.56.0"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sentry_sdk-2.51.0-py2.py3-none-any.whl", hash = "sha256:e21016d318a097c2b617bb980afd9fc737e1efc55f9b4f0cdc819982c9717d5f"},
|
||||
{file = "sentry_sdk-2.51.0.tar.gz", hash = "sha256:b89d64577075fd8c13088bc3609a2ce77a154e5beb8cba7cc16560b0539df4f7"},
|
||||
{file = "sentry_sdk-2.56.0-py2.py3-none-any.whl", hash = "sha256:5afafb744ceb91d22f4cc650c6bd048ac6af5f7412dcc6c59305a2e36f4dbc02"},
|
||||
{file = "sentry_sdk-2.56.0.tar.gz", hash = "sha256:fdab72030b69625665b2eeb9738bdde748ad254e8073085a0ce95382678e8168"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -8768,14 +8763,14 @@ test = ["pytest", "websockets"]
|
||||
|
||||
[[package]]
|
||||
name = "werkzeug"
|
||||
version = "3.1.6"
|
||||
version = "3.1.7"
|
||||
description = "The comprehensive WSGI web application library."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "werkzeug-3.1.6-py3-none-any.whl", hash = "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131"},
|
||||
{file = "werkzeug-3.1.6.tar.gz", hash = "sha256:210c6bede5a420a913956b4791a7f4d6843a43b6fcee4dfa08a65e93007d0d25"},
|
||||
{file = "werkzeug-3.1.7-py3-none-any.whl", hash = "sha256:4b314d81163a3e1a169b6a0be2a000a0e204e8873c5de6586f453c55688d422f"},
|
||||
{file = "werkzeug-3.1.7.tar.gz", hash = "sha256:fb8c01fe6ab13b9b7cdb46892b99b1d66754e1d7ab8e542e865ec13f526b5351"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -9377,4 +9372,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "2ed5b4e47d81da81963814f21702220ac5619f50cd605fd779be53c8c46ffca5"
|
||||
content-hash = "167d4549788b8bc8bb7772b9a81ade1eab73d8f354251a8d6af4901223cc7f67"
|
||||
|
||||
+20
-20
@@ -5,21 +5,21 @@ requires = ["poetry-core"]
|
||||
[project]
|
||||
authors = [{name = "Prowler Engineering", email = "engineering@prowler.com"}]
|
||||
dependencies = [
|
||||
"celery (>=5.4.0,<6.0.0)",
|
||||
"celery (==5.6.2)",
|
||||
"dj-rest-auth[with_social,jwt] (==7.0.1)",
|
||||
"django (==5.1.15)",
|
||||
"django-allauth[saml] (>=65.13.0,<66.0.0)",
|
||||
"django-celery-beat (>=2.7.0,<3.0.0)",
|
||||
"django-celery-results (>=2.5.1,<3.0.0)",
|
||||
"django-allauth[saml] (==65.15.0)",
|
||||
"django-celery-beat (==2.9.0)",
|
||||
"django-celery-results (==2.6.0)",
|
||||
"django-cors-headers==4.4.0",
|
||||
"django-environ==0.11.2",
|
||||
"django-filter==24.3",
|
||||
"django-guid==3.5.0",
|
||||
"django-postgres-extra (>=2.0.8,<3.0.0)",
|
||||
"django-postgres-extra (==2.0.9)",
|
||||
"djangorestframework==3.15.2",
|
||||
"djangorestframework-jsonapi==7.0.2",
|
||||
"djangorestframework-simplejwt (>=5.3.1,<6.0.0)",
|
||||
"drf-nested-routers (>=0.94.1,<1.0.0)",
|
||||
"djangorestframework-simplejwt (==5.5.1)",
|
||||
"drf-nested-routers (==0.95.0)",
|
||||
"drf-spectacular==0.27.2",
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"defusedxml==0.7.1",
|
||||
@@ -27,22 +27,22 @@ dependencies = [
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
"pytest-celery[redis] (==1.3.0)",
|
||||
"sentry-sdk[django] (==2.56.0)",
|
||||
"uuid6==2024.7.10",
|
||||
"openai (>=1.82.0,<2.0.0)",
|
||||
"openai (==1.109.1)",
|
||||
"xmlsec==1.3.14",
|
||||
"h2 (==4.3.0)",
|
||||
"markdown (>=3.9,<4.0)",
|
||||
"markdown (==3.10.2)",
|
||||
"drf-simple-apikey (==2.2.1)",
|
||||
"matplotlib (>=3.10.6,<4.0.0)",
|
||||
"reportlab (>=4.4.4,<5.0.0)",
|
||||
"neo4j (>=6.0.0,<7.0.0)",
|
||||
"matplotlib (==3.10.8)",
|
||||
"reportlab (==4.4.10)",
|
||||
"neo4j (==6.1.0)",
|
||||
"cartography (==0.132.0)",
|
||||
"gevent (>=25.9.1,<26.0.0)",
|
||||
"werkzeug (>=3.1.4)",
|
||||
"sqlparse (>=0.5.4)",
|
||||
"fonttools (>=4.60.2)"
|
||||
"gevent (==25.9.1)",
|
||||
"werkzeug (==3.1.7)",
|
||||
"sqlparse (==0.5.5)",
|
||||
"fonttools (==4.62.1)"
|
||||
]
|
||||
description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
@@ -50,7 +50,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.23.0"
|
||||
version = "1.24.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
@@ -62,7 +62,7 @@ django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
filelock = "3.20.3"
|
||||
freezegun = "1.5.1"
|
||||
marshmallow = ">=3.15.0,<4.0.0"
|
||||
marshmallow = "==3.26.2"
|
||||
mypy = "1.10.1"
|
||||
pylint = "3.2.5"
|
||||
pytest = "8.2.2"
|
||||
|
||||
@@ -0,0 +1,170 @@
|
||||
"""
|
||||
Cypher sanitizer for custom (user-supplied) Attack Paths queries.
|
||||
|
||||
Two responsibilities:
|
||||
|
||||
1. **Validation** - reject queries containing SSRF or dangerous procedure
|
||||
patterns (defense-in-depth; the primary control is ``neo4j.READ_ACCESS``).
|
||||
|
||||
2. **Provider-scoped label injection** - inject a dynamic
|
||||
``_Provider_{uuid}`` label into every node pattern so the database can
|
||||
use its native label index for provider isolation.
|
||||
|
||||
Label-injection pipeline:
|
||||
|
||||
1. **Protect** string literals and line comments (placeholder replacement).
|
||||
2. **Split** by top-level clause keywords to track clause context.
|
||||
3. **Pass A** - inject into *labeled* node patterns in ALL segments.
|
||||
4. **Pass B** - inject into *bare* node patterns in MATCH segments only.
|
||||
5. **Restore** protected regions.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from tasks.jobs.attack_paths.config import get_provider_label
|
||||
|
||||
|
||||
# Step 1 - String / comment protection
|
||||
# Single combined regex: strings first, then line comments.
|
||||
# The regex engine finds the leftmost match, so a string like 'https://prowler.com'
|
||||
# is consumed as a string before the // inside it can match as a comment.
|
||||
_PROTECTED_RE = re.compile(r"'(?:[^'\\]|\\.)*'|\"(?:[^\"\\]|\\.)*\"|//[^\n]*")
|
||||
|
||||
# Step 2 - Clause splitting
|
||||
# OPTIONAL MATCH must come before MATCH to avoid partial matching.
|
||||
_CLAUSE_RE = re.compile(
|
||||
r"\b(OPTIONAL\s+MATCH|MATCH|WHERE|RETURN|WITH|ORDER\s+BY"
|
||||
r"|SKIP|LIMIT|UNION|UNWIND|CALL)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Pass A - Labeled node patterns (all segments)
|
||||
# Matches node patterns that have at least one :Label.
|
||||
# (?<!\w)\( - open paren NOT preceded by a word char (excludes function calls).
|
||||
# Group 1: optional variable + one or more :Label
|
||||
# Group 2: optional {properties} + closing paren
|
||||
_LABELED_NODE_RE = re.compile(
|
||||
r"(?<!\w)\("
|
||||
r"("
|
||||
r"\s*(?:[a-zA-Z_]\w*)?"
|
||||
r"(?:\s*:\s*(?:`[^`]*`|[a-zA-Z_]\w*))+"
|
||||
r")"
|
||||
r"("
|
||||
r"\s*(?:\{[^}]*\})?"
|
||||
r"\s*\)"
|
||||
r")"
|
||||
)
|
||||
|
||||
# Pass B - Bare node patterns (MATCH segments only)
|
||||
# Matches (identifier) or (identifier {properties}) without any :Label.
|
||||
# Only applied in MATCH/OPTIONAL MATCH segments.
|
||||
_BARE_NODE_RE = re.compile(
|
||||
r"(?<!\w)\(" r"(\s*[a-zA-Z_]\w*)" r"(\s*(?:\{[^}]*\})?)" r"\s*\)"
|
||||
)
|
||||
|
||||
_MATCH_CLAUSES = frozenset({"MATCH", "OPTIONAL MATCH"})
|
||||
|
||||
|
||||
def _inject_labeled(segment: str, label: str) -> str:
|
||||
"""Inject provider label into all node patterns that have existing labels."""
|
||||
return _LABELED_NODE_RE.sub(rf"(\1:{label}\2", segment)
|
||||
|
||||
|
||||
def _inject_bare(segment: str, label: str) -> str:
|
||||
"""Inject provider label into bare `(identifier)` node patterns."""
|
||||
|
||||
def _replace(match):
|
||||
var = match.group(1)
|
||||
props = match.group(2).strip()
|
||||
if props:
|
||||
return f"({var}:{label} {props})"
|
||||
return f"({var}:{label})"
|
||||
|
||||
return _BARE_NODE_RE.sub(_replace, segment)
|
||||
|
||||
|
||||
def inject_provider_label(cypher: str, provider_id: str) -> str:
|
||||
"""Rewrite a Cypher query to scope every node pattern to a provider.
|
||||
|
||||
Args:
|
||||
cypher: The original Cypher query string.
|
||||
provider_id: The provider UUID (will be converted to a label via
|
||||
`get_provider_label`).
|
||||
|
||||
Returns:
|
||||
The rewritten Cypher with `:_Provider_{uuid}` appended to every
|
||||
node pattern.
|
||||
"""
|
||||
label = get_provider_label(provider_id)
|
||||
|
||||
# Step 1: Protect strings and comments (single pass, leftmost-first)
|
||||
protected: list[str] = []
|
||||
|
||||
def _save(match):
|
||||
protected.append(match.group(0))
|
||||
return f"\x00P{len(protected) - 1}\x00"
|
||||
|
||||
work = _PROTECTED_RE.sub(_save, cypher)
|
||||
|
||||
# Step 2: Split by clause keywords
|
||||
parts = _CLAUSE_RE.split(work)
|
||||
|
||||
# Steps 3-4: Apply injection passes per segment
|
||||
result: list[str] = []
|
||||
current_clause: str | None = None
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i % 2 == 1:
|
||||
# Keyword token - normalize for clause tracking
|
||||
current_clause = re.sub(r"\s+", " ", part.strip()).upper()
|
||||
result.append(part)
|
||||
else:
|
||||
# Content segment - apply injection based on clause context
|
||||
part = _inject_labeled(part, label)
|
||||
if current_clause in _MATCH_CLAUSES:
|
||||
part = _inject_bare(part, label)
|
||||
result.append(part)
|
||||
|
||||
work = "".join(result)
|
||||
|
||||
# Step 5: Restore protected regions
|
||||
for i, original in enumerate(protected):
|
||||
work = work.replace(f"\x00P{i}\x00", original)
|
||||
|
||||
return work
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Validation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Patterns that indicate SSRF or dangerous procedure calls
|
||||
# Defense-in-depth layer - the primary control is `neo4j.READ_ACCESS`
|
||||
_BLOCKED_PATTERNS = [
|
||||
re.compile(r"\bLOAD\s+CSV\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.load\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.import\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.export\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.cypher\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.systemdb\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.config\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.periodic\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.do\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.trigger\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.custom\b", re.IGNORECASE),
|
||||
]
|
||||
|
||||
|
||||
def validate_custom_query(cypher: str) -> None:
|
||||
"""Reject queries containing known SSRF or dangerous procedure patterns.
|
||||
|
||||
Raises ValidationError if a blocked pattern is found.
|
||||
String literals and comments are stripped before matching to avoid
|
||||
false positives.
|
||||
"""
|
||||
stripped = _PROTECTED_RE.sub("", cypher)
|
||||
for pattern in _BLOCKED_PATTERNS:
|
||||
if pattern.search(stripped):
|
||||
raise ValidationError({"query": "Query contains a blocked operation"})
|
||||
@@ -11,8 +11,8 @@ from config.env import env
|
||||
from django.conf import settings
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
get_provider_label,
|
||||
)
|
||||
|
||||
from api.attack_paths.retryable_session import RetryableSession
|
||||
@@ -163,11 +163,8 @@ def drop_subgraph(database: str, provider_id: str) -> int:
|
||||
Uses batched deletion to avoid memory issues with large graphs.
|
||||
Silently returns 0 if the database doesn't exist.
|
||||
"""
|
||||
provider_label = get_provider_label(provider_id)
|
||||
deleted_nodes = 0
|
||||
parameters = {
|
||||
"provider_id": provider_id,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
try:
|
||||
with get_session(database) as session:
|
||||
@@ -175,12 +172,12 @@ def drop_subgraph(database: str, provider_id: str) -> int:
|
||||
while deleted_count > 0:
|
||||
result = session.run(
|
||||
f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL}:`{provider_label}`)
|
||||
WITH n LIMIT $batch_size
|
||||
DETACH DELETE n
|
||||
RETURN COUNT(n) AS deleted_nodes_count
|
||||
""",
|
||||
parameters,
|
||||
{"batch_size": BATCH_SIZE},
|
||||
)
|
||||
deleted_count = result.single().get("deleted_nodes_count", 0)
|
||||
deleted_nodes += deleted_count
|
||||
@@ -199,15 +196,12 @@ def has_provider_data(database: str, provider_id: str) -> bool:
|
||||
|
||||
Returns `False` if the database doesn't exist.
|
||||
"""
|
||||
query = (
|
||||
f"MATCH (n:{PROVIDER_RESOURCE_LABEL} "
|
||||
f"{{{PROVIDER_ID_PROPERTY}: $provider_id}}) "
|
||||
"RETURN 1 LIMIT 1"
|
||||
)
|
||||
provider_label = get_provider_label(provider_id)
|
||||
query = f"MATCH (n:{PROVIDER_RESOURCE_LABEL}:`{provider_label}`) RETURN 1 LIMIT 1"
|
||||
|
||||
try:
|
||||
with get_session(database, default_access_mode=neo4j.READ_ACCESS) as session:
|
||||
result = session.run(query, {"provider_id": provider_id})
|
||||
result = session.run(query)
|
||||
return result.single() is not None
|
||||
|
||||
except GraphDatabaseQueryException as exc:
|
||||
|
||||
@@ -3,7 +3,7 @@ from api.attack_paths.queries.types import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
)
|
||||
from tasks.jobs.attack_paths.config import PROVIDER_ID_PROPERTY, PROWLER_FINDING_LABEL
|
||||
from tasks.jobs.attack_paths.config import PROWLER_FINDING_LABEL
|
||||
|
||||
|
||||
# Custom Attack Path Queries
|
||||
@@ -16,8 +16,6 @@ AWS_INTERNET_EXPOSED_EC2_SENSITIVE_S3_ACCESS = AttackPathsQueryDefinition(
|
||||
description="Detect EC2 instances with SSH exposed to the internet that can assume higher-privileged roles to read tagged sensitive S3 buckets despite bucket-level public access blocks.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path_s3 = (aws:AWSAccount {{id: $provider_uid}})--(s3:S3Bucket)--(t:AWSTag)
|
||||
WHERE toLower(t.key) = toLower($tag_key) AND toLower(t.value) = toLower($tag_value)
|
||||
|
||||
@@ -31,7 +29,7 @@ AWS_INTERNET_EXPOSED_EC2_SENSITIVE_S3_ACCESS = AttackPathsQueryDefinition(
|
||||
|
||||
MATCH path_assume_role = (ec2)-[p:STS_ASSUMEROLE_ALLOW*1..9]-(r:AWSRole)
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
|
||||
WITH collect(path_s3) + collect(path_ec2) + collect(path_role) + collect(path_assume_role) AS paths,
|
||||
head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
@@ -40,7 +38,7 @@ AWS_INTERNET_EXPOSED_EC2_SENSITIVE_S3_ACCESS = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -79,7 +77,7 @@ AWS_RDS_INSTANCES = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -102,7 +100,7 @@ AWS_RDS_UNENCRYPTED_STORAGE = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -125,7 +123,7 @@ AWS_S3_ANONYMOUS_ACCESS_BUCKETS = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -149,7 +147,7 @@ AWS_IAM_STATEMENTS_ALLOW_ALL_ACTIONS = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -173,7 +171,7 @@ AWS_IAM_STATEMENTS_ALLOW_DELETE_POLICY = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -197,7 +195,7 @@ AWS_IAM_STATEMENTS_ALLOW_CREATE_ACTIONS = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -215,12 +213,10 @@ AWS_EC2_INSTANCES_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
description="Find EC2 instances flagged as exposed to the internet within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(ec2:EC2Instance)
|
||||
WHERE ec2.exposed_internet = true
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
|
||||
WITH collect(path) AS paths, head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
UNWIND paths AS p
|
||||
@@ -228,7 +224,7 @@ AWS_EC2_INSTANCES_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -242,13 +238,11 @@ AWS_SECURITY_GROUPS_OPEN_INTERNET_FACING = AttackPathsQueryDefinition(
|
||||
description="Find internet-facing resources associated with security groups that allow inbound access from '0.0.0.0/0'.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(ec2:EC2Instance)--(sg:EC2SecurityGroup)--(ipi:IpPermissionInbound)--(ir:IpRange)
|
||||
WHERE ec2.exposed_internet = true
|
||||
AND ir.range = "0.0.0.0/0"
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(ec2)
|
||||
|
||||
WITH collect(path) AS paths, head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
UNWIND paths AS p
|
||||
@@ -256,7 +250,7 @@ AWS_SECURITY_GROUPS_OPEN_INTERNET_FACING = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -270,12 +264,10 @@ AWS_CLASSIC_ELB_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
description="Find Classic Load Balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(elb:LoadBalancer)--(listener:ELBListener)
|
||||
WHERE elb.exposed_internet = true
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(elb)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(elb)
|
||||
|
||||
WITH collect(path) AS paths, head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
UNWIND paths AS p
|
||||
@@ -283,7 +275,7 @@ AWS_CLASSIC_ELB_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -297,12 +289,10 @@ AWS_ELBV2_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
description="Find ELBv2 load balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(elbv2:LoadBalancerV2)--(listener:ELBV2Listener)
|
||||
WHERE elbv2.exposed_internet = true
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(elbv2)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(elbv2)
|
||||
|
||||
WITH collect(path) AS paths, head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
UNWIND paths AS p
|
||||
@@ -310,7 +300,7 @@ AWS_ELBV2_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -324,15 +314,13 @@ AWS_PUBLIC_IP_RESOURCE_LOOKUP = AttackPathsQueryDefinition(
|
||||
description="Given a public IP address, find the related AWS resource and its adjacent node within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
OPTIONAL MATCH (internet:Internet {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})-[r]-(x)-[q]-(y)
|
||||
WHERE (x:EC2PrivateIp AND x.public_ip = $ip)
|
||||
OR (x:EC2Instance AND x.publicipaddress = $ip)
|
||||
OR (x:NetworkInterface AND x.public_ip = $ip)
|
||||
OR (x:ElasticIPAddress AND x.public_ip = $ip)
|
||||
|
||||
OPTIONAL MATCH (internet)-[can_access:CAN_ACCESS]->(x)
|
||||
OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(x)
|
||||
|
||||
WITH collect(path) AS paths, head(collect(internet)) AS internet, collect(can_access) AS can_access
|
||||
UNWIND paths AS p
|
||||
@@ -340,7 +328,7 @@ AWS_PUBLIC_IP_RESOURCE_LOOKUP = AttackPathsQueryDefinition(
|
||||
|
||||
WITH paths, internet, can_access, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
@@ -403,7 +391,7 @@ AWS_APPRUNNER_PRIVESC_PASSROLE_CREATE_SERVICE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -441,7 +429,7 @@ AWS_APPRUNNER_PRIVESC_UPDATE_SERVICE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -511,7 +499,7 @@ AWS_BEDROCK_PRIVESC_PASSROLE_CODE_INTERPRETER = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -558,7 +546,7 @@ AWS_BEDROCK_PRIVESC_INVOKE_CODE_INTERPRETER = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -610,7 +598,7 @@ AWS_CLOUDFORMATION_PRIVESC_PASSROLE_CREATE_STACK = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -648,7 +636,7 @@ AWS_CLOUDFORMATION_PRIVESC_UPDATE_STACK = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -709,7 +697,7 @@ AWS_CLOUDFORMATION_PRIVESC_PASSROLE_CREATE_STACKSET = AttackPathsQueryDefinition
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -761,7 +749,7 @@ AWS_CLOUDFORMATION_PRIVESC_PASSROLE_UPDATE_STACKSET = AttackPathsQueryDefinition
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -808,7 +796,7 @@ AWS_CLOUDFORMATION_PRIVESC_CHANGESET = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -869,7 +857,7 @@ AWS_CODEBUILD_PRIVESC_PASSROLE_CREATE_PROJECT = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -907,7 +895,7 @@ AWS_CODEBUILD_PRIVESC_START_BUILD = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -945,7 +933,7 @@ AWS_CODEBUILD_PRIVESC_START_BUILD_BATCH = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1006,7 +994,7 @@ AWS_CODEBUILD_PRIVESC_PASSROLE_CREATE_PROJECT_BATCH = AttackPathsQueryDefinition
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1077,7 +1065,7 @@ AWS_DATAPIPELINE_PRIVESC_PASSROLE_CREATE_PIPELINE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1129,7 +1117,7 @@ AWS_EC2_PRIVESC_PASSROLE_IAM = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1185,7 +1173,7 @@ AWS_EC2_PRIVESC_MODIFY_INSTANCE_ATTRIBUTE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1237,7 +1225,7 @@ AWS_EC2_PRIVESC_PASSROLE_SPOT_INSTANCES = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1284,7 +1272,7 @@ AWS_EC2_PRIVESC_LAUNCH_TEMPLATE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1322,7 +1310,7 @@ AWS_EC2INSTANCECONNECT_PRIVESC_SEND_SSH_PUBLIC_KEY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1392,7 +1380,7 @@ AWS_ECS_PRIVESC_PASSROLE_CREATE_SERVICE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1462,7 +1450,7 @@ AWS_ECS_PRIVESC_PASSROLE_RUN_TASK = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1523,7 +1511,7 @@ AWS_ECS_PRIVESC_PASSROLE_CREATE_SERVICE_EXISTING_CLUSTER = AttackPathsQueryDefin
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1584,7 +1572,7 @@ AWS_ECS_PRIVESC_PASSROLE_RUN_TASK_EXISTING_CLUSTER = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1645,7 +1633,7 @@ AWS_ECS_PRIVESC_PASSROLE_START_TASK_EXISTING_CLUSTER = AttackPathsQueryDefinitio
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1692,7 +1680,7 @@ AWS_ECS_PRIVESC_EXECUTE_COMMAND = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1744,7 +1732,7 @@ AWS_GLUE_PRIVESC_PASSROLE_DEV_ENDPOINT = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1782,7 +1770,7 @@ AWS_GLUE_PRIVESC_UPDATE_DEV_ENDPOINT = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1843,7 +1831,7 @@ AWS_GLUE_PRIVESC_PASSROLE_CREATE_JOB = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1904,7 +1892,7 @@ AWS_GLUE_PRIVESC_PASSROLE_CREATE_JOB_TRIGGER = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -1965,7 +1953,7 @@ AWS_GLUE_PRIVESC_PASSROLE_UPDATE_JOB = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2026,7 +2014,7 @@ AWS_GLUE_PRIVESC_PASSROLE_UPDATE_JOB_TRIGGER = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2069,7 +2057,7 @@ AWS_IAM_PRIVESC_CREATE_POLICY_VERSION = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2112,7 +2100,7 @@ AWS_IAM_PRIVESC_CREATE_ACCESS_KEY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2169,7 +2157,7 @@ AWS_IAM_PRIVESC_DELETE_CREATE_ACCESS_KEY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2212,7 +2200,7 @@ AWS_IAM_PRIVESC_CREATE_LOGIN_PROFILE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2252,7 +2240,7 @@ AWS_IAM_PRIVESC_PUT_ROLE_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2295,7 +2283,7 @@ AWS_IAM_PRIVESC_UPDATE_LOGIN_PROFILE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2335,7 +2323,7 @@ AWS_IAM_PRIVESC_PUT_USER_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2375,7 +2363,7 @@ AWS_IAM_PRIVESC_ATTACH_USER_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2415,7 +2403,7 @@ AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2458,7 +2446,7 @@ AWS_IAM_PRIVESC_ATTACH_GROUP_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2501,7 +2489,7 @@ AWS_IAM_PRIVESC_PUT_GROUP_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2544,7 +2532,7 @@ AWS_IAM_PRIVESC_UPDATE_ASSUME_ROLE_POLICY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2587,7 +2575,7 @@ AWS_IAM_PRIVESC_ADD_USER_TO_GROUP = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2630,7 +2618,7 @@ AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2687,7 +2675,7 @@ AWS_IAM_PRIVESC_ATTACH_USER_POLICY_CREATE_ACCESS_KEY = AttackPathsQueryDefinitio
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2731,7 +2719,7 @@ AWS_IAM_PRIVESC_CREATE_POLICY_VERSION_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2774,7 +2762,7 @@ AWS_IAM_PRIVESC_PUT_ROLE_POLICY_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2831,7 +2819,7 @@ AWS_IAM_PRIVESC_PUT_USER_POLICY_CREATE_ACCESS_KEY = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2888,7 +2876,7 @@ AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY_UPDATE_ASSUME_ROLE = AttackPathsQueryDefiniti
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -2946,7 +2934,7 @@ AWS_IAM_PRIVESC_CREATE_POLICY_VERSION_UPDATE_ASSUME_ROLE = AttackPathsQueryDefin
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3003,7 +2991,7 @@ AWS_IAM_PRIVESC_PUT_ROLE_POLICY_UPDATE_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3064,7 +3052,7 @@ AWS_LAMBDA_PRIVESC_PASSROLE_CREATE_FUNCTION = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3125,7 +3113,7 @@ AWS_LAMBDA_PRIVESC_PASSROLE_CREATE_FUNCTION_EVENT_SOURCE = AttackPathsQueryDefin
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3168,7 +3156,7 @@ AWS_LAMBDA_PRIVESC_UPDATE_FUNCTION_CODE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3225,7 +3213,7 @@ AWS_LAMBDA_PRIVESC_UPDATE_FUNCTION_CODE_INVOKE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3282,7 +3270,7 @@ AWS_LAMBDA_PRIVESC_UPDATE_FUNCTION_CODE_ADD_PERMISSION = AttackPathsQueryDefinit
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3343,7 +3331,7 @@ AWS_LAMBDA_PRIVESC_PASSROLE_CREATE_FUNCTION_ADD_PERMISSION = AttackPathsQueryDef
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3395,7 +3383,7 @@ AWS_SAGEMAKER_PRIVESC_PASSROLE_CREATE_NOTEBOOK = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3447,7 +3435,7 @@ AWS_SAGEMAKER_PRIVESC_PASSROLE_CREATE_TRAINING_JOB = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3499,7 +3487,7 @@ AWS_SAGEMAKER_PRIVESC_PASSROLE_CREATE_PROCESSING_JOB = AttackPathsQueryDefinitio
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3542,7 +3530,7 @@ AWS_SAGEMAKER_PRIVESC_PRESIGNED_NOTEBOOK_URL = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3612,7 +3600,7 @@ AWS_SAGEMAKER_PRIVESC_LIFECYCLE_CONFIG_NOTEBOOK = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3650,7 +3638,7 @@ AWS_SSM_PRIVESC_START_SESSION = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3688,7 +3676,7 @@ AWS_SSM_PRIVESC_SEND_COMMAND = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
@@ -3731,7 +3719,7 @@ AWS_STS_PRIVESC_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
WITH paths, collect(DISTINCT n) AS unique_nodes
|
||||
UNWIND unique_nodes AS n
|
||||
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL'}})
|
||||
|
||||
RETURN paths, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
from tasks.jobs.attack_paths.config import PROVIDER_ID_PROPERTY, PROVIDER_RESOURCE_LABEL
|
||||
from tasks.jobs.attack_paths.config import PROVIDER_RESOURCE_LABEL, get_provider_label
|
||||
|
||||
|
||||
def get_cartography_schema_query(provider_id: str) -> str:
|
||||
"""Build the Cartography schema metadata query scoped to a provider label."""
|
||||
provider_label = get_provider_label(provider_id)
|
||||
return f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL}:`{provider_label}`)
|
||||
WHERE n._module_name STARTS WITH 'cartography:'
|
||||
AND NOT n._module_name IN ['cartography:ontology', 'cartography:prowler']
|
||||
AND n._module_version IS NOT NULL
|
||||
RETURN n._module_name AS module_name, n._module_version AS module_version
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
CARTOGRAPHY_SCHEMA_METADATA = f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
WHERE n._module_name STARTS WITH 'cartography:'
|
||||
AND NOT n._module_name IN ['cartography:ontology', 'cartography:prowler']
|
||||
AND n._module_version IS NOT NULL
|
||||
RETURN n._module_name AS module_name, n._module_version AS module_version
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
GITHUB_SCHEMA_URL = (
|
||||
"https://github.com/cartography-cncf/cartography/blob/"
|
||||
|
||||
@@ -1,22 +1,26 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from typing import Any, Iterable
|
||||
|
||||
import neo4j
|
||||
|
||||
from rest_framework.exceptions import APIException, PermissionDenied, ValidationError
|
||||
|
||||
from api.attack_paths import database as graph_database, AttackPathsQueryDefinition
|
||||
from api.attack_paths.cypher_sanitizer import (
|
||||
inject_provider_label,
|
||||
validate_custom_query,
|
||||
)
|
||||
from api.attack_paths.queries.schema import (
|
||||
CARTOGRAPHY_SCHEMA_METADATA,
|
||||
GITHUB_SCHEMA_URL,
|
||||
RAW_SCHEMA_URL,
|
||||
get_cartography_schema_query,
|
||||
)
|
||||
from config.custom_logging import BackendLogger
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
INTERNAL_LABELS,
|
||||
INTERNAL_PROPERTIES,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
get_provider_label,
|
||||
is_dynamic_isolation_label,
|
||||
)
|
||||
|
||||
@@ -72,7 +76,6 @@ def prepare_parameters(
|
||||
|
||||
clean_parameters = {
|
||||
"provider_uid": str(provider_uid),
|
||||
"provider_id": str(provider_id),
|
||||
}
|
||||
|
||||
for definition_parameter in definition.parameters:
|
||||
@@ -123,38 +126,6 @@ def execute_query(
|
||||
|
||||
# Custom query helpers
|
||||
|
||||
# Patterns that indicate SSRF or dangerous procedure calls
|
||||
# Defense-in-depth layer - the primary control is `neo4j.READ_ACCESS`
|
||||
_BLOCKED_PATTERNS = [
|
||||
re.compile(r"\bLOAD\s+CSV\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.load\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.import\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.export\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.cypher\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.systemdb\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.config\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.periodic\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.do\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.trigger\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.custom\b", re.IGNORECASE),
|
||||
]
|
||||
|
||||
# Strip string literals so patterns inside quotes don't cause false positives
|
||||
# Handles escaped quotes (\' and \") inside strings
|
||||
_STRING_LITERALS = re.compile(r"'(?:[^'\\]|\\.)*'|\"(?:[^\"\\]|\\.)*\"")
|
||||
|
||||
|
||||
def validate_custom_query(cypher: str) -> None:
|
||||
"""Reject queries containing known SSRF or dangerous procedure patterns.
|
||||
|
||||
Raises ValidationError if a blocked pattern is found.
|
||||
String literals are stripped before matching to avoid false positives.
|
||||
"""
|
||||
stripped = _STRING_LITERALS.sub("", cypher)
|
||||
for pattern in _BLOCKED_PATTERNS:
|
||||
if pattern.search(stripped):
|
||||
raise ValidationError({"query": "Query contains a blocked operation"})
|
||||
|
||||
|
||||
def normalize_custom_query_payload(raw_data):
|
||||
if not isinstance(raw_data, dict):
|
||||
@@ -173,7 +144,15 @@ def execute_custom_query(
|
||||
cypher: str,
|
||||
provider_id: str,
|
||||
) -> dict[str, Any]:
|
||||
# Defense-in-depth for custom queries:
|
||||
# 1. neo4j.READ_ACCESS — prevents mutations at the driver level
|
||||
# 2. inject_provider_label() — regex-based label injection scopes node patterns
|
||||
# 3. _serialize_graph() — post-query filter drops nodes without the provider label
|
||||
#
|
||||
# Layer 2 is best-effort (regex can't fully parse Cypher);
|
||||
# layer 3 is the safety net that guarantees provider isolation.
|
||||
validate_custom_query(cypher)
|
||||
cypher = inject_provider_label(cypher, provider_id)
|
||||
|
||||
try:
|
||||
graph = graph_database.execute_read_query(
|
||||
@@ -208,10 +187,7 @@ def get_cartography_schema(
|
||||
with graph_database.get_session(
|
||||
database_name, default_access_mode=neo4j.READ_ACCESS
|
||||
) as session:
|
||||
result = session.run(
|
||||
CARTOGRAPHY_SCHEMA_METADATA,
|
||||
{"provider_id": provider_id},
|
||||
)
|
||||
result = session.run(get_cartography_schema_query(provider_id))
|
||||
record = result.single()
|
||||
except graph_database.GraphDatabaseQueryException as exc:
|
||||
logger.error(f"Cartography schema query failed: {exc}")
|
||||
@@ -255,10 +231,12 @@ def _truncate_graph(graph: dict[str, Any]) -> dict[str, Any]:
|
||||
|
||||
|
||||
def _serialize_graph(graph, provider_id: str) -> dict[str, Any]:
|
||||
provider_label = get_provider_label(provider_id)
|
||||
|
||||
nodes = []
|
||||
kept_node_ids = set()
|
||||
for node in graph.nodes:
|
||||
if node._properties.get(PROVIDER_ID_PROPERTY) != provider_id:
|
||||
if provider_label not in node.labels:
|
||||
continue
|
||||
|
||||
kept_node_ids.add(node.element_id)
|
||||
@@ -273,14 +251,11 @@ def _serialize_graph(graph, provider_id: str) -> dict[str, Any]:
|
||||
filtered_count = len(graph.nodes) - len(nodes)
|
||||
if filtered_count > 0:
|
||||
logger.debug(
|
||||
f"Filtered {filtered_count} nodes without matching provider_id={provider_id}"
|
||||
f"Filtered {filtered_count} nodes without provider label {provider_label}"
|
||||
)
|
||||
|
||||
relationships = []
|
||||
for relationship in graph.relationships:
|
||||
if relationship._properties.get(PROVIDER_ID_PROPERTY) != provider_id:
|
||||
continue
|
||||
|
||||
if (
|
||||
relationship.start_node.element_id not in kept_node_ids
|
||||
or relationship.end_node.element_id not in kept_node_ids
|
||||
|
||||
+170
-18
@@ -15,6 +15,7 @@ from django_filters.rest_framework import (
|
||||
from rest_framework_json_api.django_filters.backends import DjangoFilterBackend
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.constants import SEVERITY_ORDER
|
||||
from api.db_utils import (
|
||||
FindingDeltaEnumField,
|
||||
InvitationStateEnumField,
|
||||
@@ -43,6 +44,7 @@ from api.models import (
|
||||
ProviderGroup,
|
||||
ProviderSecret,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceTag,
|
||||
Role,
|
||||
Scan,
|
||||
@@ -196,17 +198,13 @@ class CommonFindingFilters(FilterSet):
|
||||
field_name="resource_services", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_uid = CharFilter(field_name="resources__uid")
|
||||
resource_uid__in = CharInFilter(field_name="resources__uid", lookup_expr="in")
|
||||
resource_uid__icontains = CharFilter(
|
||||
field_name="resources__uid", lookup_expr="icontains"
|
||||
)
|
||||
resource_uid = CharFilter(method="filter_resource_uid")
|
||||
resource_uid__in = CharInFilter(method="filter_resource_uid_in")
|
||||
resource_uid__icontains = CharFilter(method="filter_resource_uid_icontains")
|
||||
|
||||
resource_name = CharFilter(field_name="resources__name")
|
||||
resource_name__in = CharInFilter(field_name="resources__name", lookup_expr="in")
|
||||
resource_name__icontains = CharFilter(
|
||||
field_name="resources__name", lookup_expr="icontains"
|
||||
)
|
||||
resource_name = CharFilter(method="filter_resource_name")
|
||||
resource_name__in = CharInFilter(method="filter_resource_name_in")
|
||||
resource_name__icontains = CharFilter(method="filter_resource_name_icontains")
|
||||
|
||||
resource_type = CharFilter(method="filter_resource_type")
|
||||
resource_type__in = CharInFilter(field_name="resource_types", lookup_expr="overlap")
|
||||
@@ -264,6 +262,52 @@ class CommonFindingFilters(FilterSet):
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
def filter_check_title_icontains(self, queryset, name, value):
|
||||
# Resolve from the summary table (has check_title column + trigram
|
||||
# GIN index) instead of scanning JSON in the findings table.
|
||||
matching_check_ids = (
|
||||
FindingGroupDailySummary.objects.filter(
|
||||
check_title__icontains=value,
|
||||
)
|
||||
.values_list("check_id", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
return queryset.filter(check_id__in=matching_check_ids)
|
||||
|
||||
# --- Resource subquery filters ---
|
||||
# Resolve resource → RFM → finding_ids first, then filter findings
|
||||
# by id__in. This avoids a 3-way JOIN driven from the (huge)
|
||||
# findings side and lets PostgreSQL start from the resources
|
||||
# unique-constraint index instead.
|
||||
|
||||
@staticmethod
|
||||
def _finding_ids_for_resources(**lookup):
|
||||
return ResourceFindingMapping.objects.filter(
|
||||
resource__in=Resource.objects.filter(**lookup).values("id")
|
||||
).values("finding_id")
|
||||
|
||||
def filter_resource_uid(self, queryset, name, value):
|
||||
return queryset.filter(id__in=self._finding_ids_for_resources(uid=value))
|
||||
|
||||
def filter_resource_uid_in(self, queryset, name, value):
|
||||
return queryset.filter(id__in=self._finding_ids_for_resources(uid__in=value))
|
||||
|
||||
def filter_resource_uid_icontains(self, queryset, name, value):
|
||||
return queryset.filter(
|
||||
id__in=self._finding_ids_for_resources(uid__icontains=value)
|
||||
)
|
||||
|
||||
def filter_resource_name(self, queryset, name, value):
|
||||
return queryset.filter(id__in=self._finding_ids_for_resources(name=value))
|
||||
|
||||
def filter_resource_name_in(self, queryset, name, value):
|
||||
return queryset.filter(id__in=self._finding_ids_for_resources(name__in=value))
|
||||
|
||||
def filter_resource_name_icontains(self, queryset, name, value):
|
||||
return queryset.filter(
|
||||
id__in=self._finding_ids_for_resources(name__icontains=value)
|
||||
)
|
||||
|
||||
|
||||
class TenantFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
@@ -803,11 +847,15 @@ class FindingGroupFilter(CommonFindingFilters):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(method="filter_check_title_icontains")
|
||||
scan = UUIDFilter(field_name="scan_id", lookup_expr="exact")
|
||||
scan__in = UUIDInFilter(field_name="scan_id", lookup_expr="in")
|
||||
|
||||
class Meta:
|
||||
model = Finding
|
||||
fields = {
|
||||
"check_id": ["exact", "in", "icontains"],
|
||||
"scan": ["exact", "in"],
|
||||
}
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
@@ -895,15 +943,31 @@ class LatestFindingGroupFilter(CommonFindingFilters):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(method="filter_check_title_icontains")
|
||||
scan = UUIDFilter(field_name="scan_id", lookup_expr="exact")
|
||||
scan__in = UUIDInFilter(field_name="scan_id", lookup_expr="in")
|
||||
|
||||
class Meta:
|
||||
model = Finding
|
||||
fields = {
|
||||
"check_id": ["exact", "in", "icontains"],
|
||||
"scan": ["exact", "in"],
|
||||
}
|
||||
|
||||
|
||||
class FindingGroupSummaryFilter(FilterSet):
|
||||
class _CheckTitleToCheckIdMixin:
|
||||
"""Resolve check_title search to check_ids so all provider rows are kept."""
|
||||
|
||||
def filter_check_title_to_check_ids(self, queryset, name, value):
|
||||
matching_check_ids = (
|
||||
queryset.filter(check_title__icontains=value)
|
||||
.values_list("check_id", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
return queryset.filter(check_id__in=matching_check_ids)
|
||||
|
||||
|
||||
class FindingGroupSummaryFilter(_CheckTitleToCheckIdMixin, FilterSet):
|
||||
"""
|
||||
Filter for FindingGroupDailySummary queries.
|
||||
|
||||
@@ -926,9 +990,7 @@ class FindingGroupSummaryFilter(FilterSet):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(
|
||||
field_name="check_title", lookup_expr="icontains"
|
||||
)
|
||||
check_title__icontains = CharFilter(method="filter_check_title_to_check_ids")
|
||||
|
||||
# Provider filters
|
||||
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
|
||||
@@ -1016,7 +1078,7 @@ class FindingGroupSummaryFilter(FilterSet):
|
||||
return dt
|
||||
|
||||
|
||||
class LatestFindingGroupSummaryFilter(FilterSet):
|
||||
class LatestFindingGroupSummaryFilter(_CheckTitleToCheckIdMixin, FilterSet):
|
||||
"""
|
||||
Filter for FindingGroupDailySummary /latest endpoint.
|
||||
|
||||
@@ -1028,9 +1090,7 @@ class LatestFindingGroupSummaryFilter(FilterSet):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(
|
||||
field_name="check_title", lookup_expr="icontains"
|
||||
)
|
||||
check_title__icontains = CharFilter(method="filter_check_title_to_check_ids")
|
||||
|
||||
# Provider filters
|
||||
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
|
||||
@@ -1048,6 +1108,98 @@ class LatestFindingGroupSummaryFilter(FilterSet):
|
||||
}
|
||||
|
||||
|
||||
class FindingGroupAggregatedComputedFilter(FilterSet):
|
||||
"""Filter aggregated finding-group rows by computed status/severity/muted."""
|
||||
|
||||
STATUS_CHOICES = (
|
||||
("FAIL", "Fail"),
|
||||
("PASS", "Pass"),
|
||||
("MUTED", "Muted"),
|
||||
)
|
||||
|
||||
status = ChoiceFilter(method="filter_status", choices=STATUS_CHOICES)
|
||||
status__in = CharInFilter(method="filter_status_in", lookup_expr="in")
|
||||
severity = ChoiceFilter(method="filter_severity", choices=SeverityChoices)
|
||||
severity__in = CharInFilter(method="filter_severity_in", lookup_expr="in")
|
||||
include_muted = BooleanFilter(method="filter_include_muted")
|
||||
|
||||
def filter_status(self, queryset, name, value):
|
||||
return queryset.filter(aggregated_status=value)
|
||||
|
||||
def filter_status_in(self, queryset, name, value):
|
||||
values = value
|
||||
if isinstance(value, str):
|
||||
values = [part.strip() for part in value.split(",") if part.strip()]
|
||||
|
||||
allowed = {choice[0] for choice in self.STATUS_CHOICES}
|
||||
invalid = [
|
||||
status_value for status_value in values if status_value not in allowed
|
||||
]
|
||||
if invalid:
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"invalid status filter: {invalid[0]}",
|
||||
"status": "400",
|
||||
"source": {"pointer": "/data"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
if not values:
|
||||
return queryset
|
||||
|
||||
return queryset.filter(aggregated_status__in=values)
|
||||
|
||||
def filter_severity(self, queryset, name, value):
|
||||
severity_order = SEVERITY_ORDER.get(value)
|
||||
if severity_order is None:
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"invalid severity filter: {value}",
|
||||
"status": "400",
|
||||
"source": {"pointer": "/data"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
return queryset.filter(severity_order=severity_order)
|
||||
|
||||
def filter_severity_in(self, queryset, name, value):
|
||||
values = value
|
||||
if isinstance(value, str):
|
||||
values = [part.strip() for part in value.split(",") if part.strip()]
|
||||
|
||||
orders = []
|
||||
for severity_value in values:
|
||||
severity_order = SEVERITY_ORDER.get(severity_value)
|
||||
if severity_order is None:
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"invalid severity filter: {severity_value}",
|
||||
"status": "400",
|
||||
"source": {"pointer": "/data"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
orders.append(severity_order)
|
||||
|
||||
if not orders:
|
||||
return queryset
|
||||
|
||||
return queryset.filter(severity_order__in=orders)
|
||||
|
||||
def filter_include_muted(self, queryset, name, value):
|
||||
if value is True:
|
||||
return queryset
|
||||
# include_muted=false: exclude fully-muted groups
|
||||
return queryset.exclude(fail_count=0, pass_count=0, muted_count__gt=0)
|
||||
|
||||
|
||||
class ProviderSecretFilter(FilterSet):
|
||||
inserted_at = DateFilter(
|
||||
field_name="inserted_at",
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
TASK_NAME = "attack-paths-cleanup-stale-scans"
|
||||
INTERVAL_HOURS = 1
|
||||
|
||||
|
||||
def create_periodic_task(apps, schema_editor):
|
||||
IntervalSchedule = apps.get_model("django_celery_beat", "IntervalSchedule")
|
||||
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
|
||||
|
||||
schedule, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=INTERVAL_HOURS,
|
||||
period="hours",
|
||||
)
|
||||
|
||||
PeriodicTask.objects.update_or_create(
|
||||
name=TASK_NAME,
|
||||
defaults={
|
||||
"task": TASK_NAME,
|
||||
"interval": schedule,
|
||||
"enabled": True,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def delete_periodic_task(apps, schema_editor):
|
||||
IntervalSchedule = apps.get_model("django_celery_beat", "IntervalSchedule")
|
||||
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
|
||||
|
||||
PeriodicTask.objects.filter(name=TASK_NAME).delete()
|
||||
|
||||
# Clean up the schedule if no other task references it
|
||||
IntervalSchedule.objects.filter(
|
||||
every=INTERVAL_HOURS,
|
||||
period="hours",
|
||||
periodictask__isnull=True,
|
||||
).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0085_finding_group_daily_summary_trgm_indexes"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(create_periodic_task, delete_periodic_task),
|
||||
]
|
||||
@@ -30,7 +30,10 @@ class HasPermissions(BasePermission):
|
||||
return True
|
||||
|
||||
user_roles = (
|
||||
User.objects.using(MainRouter.admin_db).get(id=request.user.id).roles.all()
|
||||
User.objects.using(MainRouter.admin_db)
|
||||
.get(id=request.user.id)
|
||||
.roles.using(MainRouter.admin_db)
|
||||
.all()
|
||||
)
|
||||
if not user_roles:
|
||||
return False
|
||||
|
||||
@@ -61,7 +61,7 @@ def revoke_membership_api_keys(sender, instance, **kwargs): # noqa: F841
|
||||
in that tenant should be revoked to prevent further access.
|
||||
"""
|
||||
TenantAPIKey.objects.filter(
|
||||
entity=instance.user, tenant_id=instance.tenant.id
|
||||
entity_id=instance.user_id, tenant_id=instance.tenant_id
|
||||
).update(revoked=True)
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.23.0
|
||||
version: 1.24.0
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ from api.attack_paths import database as graph_database
|
||||
from api.attack_paths import views_helpers
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
get_provider_label,
|
||||
)
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ def test_prepare_parameters_includes_provider_and_casts(
|
||||
)
|
||||
|
||||
assert result["provider_uid"] == "123456789012"
|
||||
assert result["provider_id"] == "test-provider-id"
|
||||
assert "provider_id" not in result
|
||||
assert result["limit"] == 5
|
||||
|
||||
|
||||
@@ -107,12 +107,12 @@ def test_execute_query_serializes_graph(
|
||||
parameters = {"provider_uid": "123"}
|
||||
|
||||
provider_id = "test-provider-123"
|
||||
plabel = get_provider_label(provider_id)
|
||||
node = attack_paths_graph_stub_classes.Node(
|
||||
element_id="node-1",
|
||||
labels=["AWSAccount"],
|
||||
labels=["AWSAccount", plabel],
|
||||
properties={
|
||||
"name": "account",
|
||||
PROVIDER_ID_PROPERTY: provider_id,
|
||||
"complex": {
|
||||
"items": [
|
||||
attack_paths_graph_stub_classes.NativeValue("value"),
|
||||
@@ -121,15 +121,13 @@ def test_execute_query_serializes_graph(
|
||||
},
|
||||
},
|
||||
)
|
||||
node_2 = attack_paths_graph_stub_classes.Node(
|
||||
"node-2", ["RDSInstance"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
)
|
||||
node_2 = attack_paths_graph_stub_classes.Node("node-2", ["RDSInstance", plabel], {})
|
||||
relationship = attack_paths_graph_stub_classes.Relationship(
|
||||
element_id="rel-1",
|
||||
rel_type="OWNS",
|
||||
start_node=node,
|
||||
end_node=node_2,
|
||||
properties={"weight": 1, PROVIDER_ID_PROPERTY: provider_id},
|
||||
properties={"weight": 1},
|
||||
)
|
||||
graph = SimpleNamespace(nodes=[node, node_2], relationships=[relationship])
|
||||
|
||||
@@ -213,29 +211,27 @@ def test_execute_query_raises_permission_denied_on_read_only(
|
||||
)
|
||||
|
||||
|
||||
def test_serialize_graph_filters_by_provider_id(attack_paths_graph_stub_classes):
|
||||
def test_serialize_graph_filters_by_provider_label(attack_paths_graph_stub_classes):
|
||||
provider_id = "provider-keep"
|
||||
plabel = get_provider_label(provider_id)
|
||||
other_label = get_provider_label("provider-other")
|
||||
|
||||
node_keep = attack_paths_graph_stub_classes.Node(
|
||||
"n1", ["AWSAccount"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
)
|
||||
node_keep = attack_paths_graph_stub_classes.Node("n1", ["AWSAccount", plabel], {})
|
||||
node_drop = attack_paths_graph_stub_classes.Node(
|
||||
"n2", ["AWSAccount"], {PROVIDER_ID_PROPERTY: "provider-other"}
|
||||
"n2", ["AWSAccount", other_label], {}
|
||||
)
|
||||
|
||||
rel_keep = attack_paths_graph_stub_classes.Relationship(
|
||||
"r1", "OWNS", node_keep, node_keep, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
)
|
||||
rel_drop_by_provider = attack_paths_graph_stub_classes.Relationship(
|
||||
"r2", "OWNS", node_keep, node_drop, {PROVIDER_ID_PROPERTY: "provider-other"}
|
||||
"r1", "OWNS", node_keep, node_keep, {}
|
||||
)
|
||||
# Relationship connecting a kept node to a dropped node — filtered by endpoint check
|
||||
rel_drop_orphaned = attack_paths_graph_stub_classes.Relationship(
|
||||
"r3", "OWNS", node_keep, node_drop, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"r2", "OWNS", node_keep, node_drop, {}
|
||||
)
|
||||
|
||||
graph = SimpleNamespace(
|
||||
nodes=[node_keep, node_drop],
|
||||
relationships=[rel_keep, rel_drop_by_provider, rel_drop_orphaned],
|
||||
relationships=[rel_keep, rel_drop_orphaned],
|
||||
)
|
||||
|
||||
result = views_helpers._serialize_graph(graph, provider_id)
|
||||
@@ -354,7 +350,6 @@ def test_serialize_properties_filters_internal_fields():
|
||||
"_module_name": "cartography:aws",
|
||||
"_module_version": "0.98.0",
|
||||
# Provider isolation
|
||||
PROVIDER_ID_PROPERTY: "42",
|
||||
PROVIDER_ELEMENT_ID_PROPERTY: "42:abc123",
|
||||
}
|
||||
|
||||
@@ -449,14 +444,11 @@ def test_execute_custom_query_serializes_graph(
|
||||
attack_paths_graph_stub_classes,
|
||||
):
|
||||
provider_id = "test-provider-123"
|
||||
node_1 = attack_paths_graph_stub_classes.Node(
|
||||
"node-1", ["AWSAccount"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
)
|
||||
node_2 = attack_paths_graph_stub_classes.Node(
|
||||
"node-2", ["RDSInstance"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
)
|
||||
plabel = get_provider_label(provider_id)
|
||||
node_1 = attack_paths_graph_stub_classes.Node("node-1", ["AWSAccount", plabel], {})
|
||||
node_2 = attack_paths_graph_stub_classes.Node("node-2", ["RDSInstance", plabel], {})
|
||||
relationship = attack_paths_graph_stub_classes.Relationship(
|
||||
"rel-1", "OWNS", node_1, node_2, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"rel-1", "OWNS", node_1, node_2, {}
|
||||
)
|
||||
|
||||
graph_result = MagicMock()
|
||||
@@ -471,10 +463,11 @@ def test_execute_custom_query_serializes_graph(
|
||||
"db-tenant-test", "MATCH (n) RETURN n", provider_id
|
||||
)
|
||||
|
||||
mock_execute.assert_called_once_with(
|
||||
database="db-tenant-test",
|
||||
cypher="MATCH (n) RETURN n",
|
||||
)
|
||||
mock_execute.assert_called_once()
|
||||
call_kwargs = mock_execute.call_args[1]
|
||||
assert call_kwargs["database"] == "db-tenant-test"
|
||||
# The cypher is rewritten with the provider label injection
|
||||
assert plabel in call_kwargs["cypher"]
|
||||
assert len(result["nodes"]) == 2
|
||||
assert result["relationships"][0]["label"] == "OWNS"
|
||||
assert result["truncated"] is False
|
||||
@@ -511,72 +504,6 @@ def test_execute_custom_query_wraps_graph_errors():
|
||||
mock_logger.error.assert_called_once()
|
||||
|
||||
|
||||
# -- validate_custom_query ------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"LOAD CSV FROM 'http://169.254.169.254/' AS x RETURN x",
|
||||
"load csv from 'http://evil.com' as row return row",
|
||||
"CALL apoc.load.json('http://evil.com/') YIELD value RETURN value",
|
||||
"CALL apoc.load.csvParams('http://evil.com/', {}, null) YIELD list RETURN list",
|
||||
"CALL apoc.import.csv([{fileName: 'f'}], [], {}) YIELD node RETURN node",
|
||||
"CALL apoc.export.csv.all('file.csv', {})",
|
||||
"CALL apoc.cypher.run('CREATE (n)', {}) YIELD value RETURN value",
|
||||
"CALL apoc.systemdb.graph() YIELD nodes RETURN nodes",
|
||||
"CALL apoc.config.list() YIELD key, value RETURN key, value",
|
||||
"CALL apoc.periodic.iterate('MATCH (n) RETURN n', 'DELETE n', {batchSize: 100})",
|
||||
"CALL apoc.do.when(true, 'CREATE (n) RETURN n', '', {}) YIELD value RETURN value",
|
||||
"CALL apoc.trigger.add('t', 'RETURN 1', {phase: 'before'})",
|
||||
"CALL apoc.custom.asProcedure('myProc', 'RETURN 1')",
|
||||
],
|
||||
ids=[
|
||||
"LOAD_CSV",
|
||||
"LOAD_CSV_lowercase",
|
||||
"apoc.load.json",
|
||||
"apoc.load.csvParams",
|
||||
"apoc.import.csv",
|
||||
"apoc.export.csv",
|
||||
"apoc.cypher.run",
|
||||
"apoc.systemdb.graph",
|
||||
"apoc.config.list",
|
||||
"apoc.periodic.iterate",
|
||||
"apoc.do.when",
|
||||
"apoc.trigger.add",
|
||||
"apoc.custom.asProcedure",
|
||||
],
|
||||
)
|
||||
def test_validate_custom_query_rejects_blocked_patterns(cypher):
|
||||
with pytest.raises(ValidationError) as exc:
|
||||
views_helpers.validate_custom_query(cypher)
|
||||
|
||||
assert "blocked operation" in str(exc.value.detail)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"MATCH (n:AWSAccount) RETURN n LIMIT 10",
|
||||
"MATCH (a)-[r]->(b) RETURN a, r, b",
|
||||
"MATCH (n) WHERE n.name CONTAINS 'load' RETURN n",
|
||||
"CALL apoc.create.vNode(['Label'], {}) YIELD node RETURN node",
|
||||
"MATCH (n) WHERE n.name = 'apoc.load.json' RETURN n",
|
||||
'MATCH (n) WHERE n.description = "LOAD CSV is cool" RETURN n',
|
||||
],
|
||||
ids=[
|
||||
"simple_match",
|
||||
"traversal",
|
||||
"contains_load_substring",
|
||||
"apoc_virtual_node",
|
||||
"apoc_load_inside_single_quotes",
|
||||
"load_csv_inside_double_quotes",
|
||||
],
|
||||
)
|
||||
def test_validate_custom_query_allows_clean_queries(cypher):
|
||||
views_helpers.validate_custom_query(cypher)
|
||||
|
||||
|
||||
# -- _truncate_graph ----------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
import pytest
|
||||
from config.settings.celery import _build_celery_broker_url
|
||||
|
||||
|
||||
class TestBuildCeleryBrokerUrl:
|
||||
def test_without_credentials(self):
|
||||
broker_url = _build_celery_broker_url("redis", "", "", "valkey", "6379", "0")
|
||||
|
||||
assert broker_url == "redis://valkey:6379/0"
|
||||
|
||||
def test_with_password_only(self):
|
||||
broker_url = _build_celery_broker_url(
|
||||
"rediss", "", "secret", "cache.example.com", "6379", "0"
|
||||
)
|
||||
|
||||
assert broker_url == "rediss://:secret@cache.example.com:6379/0"
|
||||
|
||||
def test_with_username_and_password(self):
|
||||
broker_url = _build_celery_broker_url(
|
||||
"rediss", "default", "secret", "cache.example.com", "6379", "0"
|
||||
)
|
||||
|
||||
assert broker_url == "rediss://default:secret@cache.example.com:6379/0"
|
||||
|
||||
def test_with_username_only(self):
|
||||
broker_url = _build_celery_broker_url(
|
||||
"redis", "admin", "", "valkey", "6379", "0"
|
||||
)
|
||||
|
||||
assert broker_url == "redis://admin@valkey:6379/0"
|
||||
|
||||
def test_url_encodes_credentials(self):
|
||||
broker_url = _build_celery_broker_url(
|
||||
"rediss", "user@name", "p@ss:word", "cache.example.com", "6379", "0"
|
||||
)
|
||||
|
||||
assert (
|
||||
broker_url == "rediss://user%40name:p%40ss%3Aword@cache.example.com:6379/0"
|
||||
)
|
||||
|
||||
def test_invalid_scheme_raises_error(self):
|
||||
with pytest.raises(ValueError, match="Invalid VALKEY_SCHEME 'http'"):
|
||||
_build_celery_broker_url("http", "", "", "valkey", "6379", "0")
|
||||
@@ -0,0 +1,429 @@
|
||||
"""Unit tests for the Cypher sanitizer (validation + provider-label injection)."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from api.attack_paths.cypher_sanitizer import (
|
||||
inject_provider_label,
|
||||
validate_custom_query,
|
||||
)
|
||||
|
||||
PROVIDER_ID = "019c41ee-7df3-7dec-a684-d839f95619f8"
|
||||
LABEL = "_Provider_019c41ee7df37deca684d839f95619f8"
|
||||
|
||||
|
||||
def _inject(cypher: str) -> str:
|
||||
"""Shortcut that patches `get_provider_label` to avoid config imports."""
|
||||
with patch(
|
||||
"api.attack_paths.cypher_sanitizer.get_provider_label", return_value=LABEL
|
||||
):
|
||||
return inject_provider_label(cypher, PROVIDER_ID)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pass A - Labeled node patterns (all clauses)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestLabeledNodes:
|
||||
def test_single_label(self):
|
||||
result = _inject("MATCH (n:AWSRole) RETURN n")
|
||||
assert f"(n:AWSRole:{LABEL})" in result
|
||||
|
||||
def test_label_with_properties(self):
|
||||
result = _inject("MATCH (n:AWSRole {name: 'admin'}) RETURN n")
|
||||
assert f"(n:AWSRole:{LABEL} {{name: 'admin'}})" in result
|
||||
|
||||
def test_multiple_labels(self):
|
||||
result = _inject("MATCH (n:AWSRole:AWSPrincipal) RETURN n")
|
||||
assert f"(n:AWSRole:AWSPrincipal:{LABEL})" in result
|
||||
|
||||
def test_anonymous_labeled(self):
|
||||
result = _inject(
|
||||
"MATCH (:AWSPrincipal {arn: 'ecs-tasks.amazonaws.com'}) RETURN 1"
|
||||
)
|
||||
assert f"(:AWSPrincipal:{LABEL} {{arn: 'ecs-tasks.amazonaws.com'}})" in result
|
||||
|
||||
def test_backtick_label(self):
|
||||
result = _inject("MATCH (n:`My Label`) RETURN n")
|
||||
assert f"(n:`My Label`:{LABEL})" in result
|
||||
|
||||
def test_labeled_in_where_clause(self):
|
||||
"""Labeled nodes in WHERE (pattern existence) still get the label."""
|
||||
result = _inject(
|
||||
"MATCH (n:AWSRole) WHERE EXISTS((n)-[:REL]->(:Target)) RETURN n"
|
||||
)
|
||||
assert f"(n:AWSRole:{LABEL})" in result
|
||||
assert f"(:Target:{LABEL})" in result
|
||||
|
||||
def test_labeled_in_return_clause(self):
|
||||
"""Labeled nodes in RETURN still get the label (they're always node patterns)."""
|
||||
result = _inject("MATCH (n:AWSRole) RETURN (n:AWSRole)")
|
||||
assert result.count(f":AWSRole:{LABEL}") == 2
|
||||
|
||||
def test_labeled_in_optional_match(self):
|
||||
result = _inject(
|
||||
"OPTIONAL MATCH (pf:ProwlerFinding {status: 'FAIL'}) RETURN pf"
|
||||
)
|
||||
assert f"(pf:ProwlerFinding:{LABEL} {{status: 'FAIL'}})" in result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pass B - Bare node patterns (MATCH/OPTIONAL MATCH only)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBareNodes:
|
||||
def test_bare_in_match(self):
|
||||
result = _inject("MATCH (a)-[:HAS_POLICY]->(b) RETURN a, b")
|
||||
assert f"(a:{LABEL})" in result
|
||||
assert f"(b:{LABEL})" in result
|
||||
|
||||
def test_bare_with_properties_in_match(self):
|
||||
result = _inject("MATCH (n {name: 'x'}) RETURN n")
|
||||
assert f"(n:{LABEL} {{name: 'x'}})" in result
|
||||
|
||||
def test_bare_in_optional_match(self):
|
||||
result = _inject("OPTIONAL MATCH (n)-[r]-(m) RETURN n")
|
||||
assert f"(n:{LABEL})" in result
|
||||
assert f"(m:{LABEL})" in result
|
||||
|
||||
def test_bare_not_injected_in_return(self):
|
||||
"""Bare (identifier) in RETURN could be expression grouping."""
|
||||
cypher = "MATCH (n:AWSRole) RETURN (n)"
|
||||
result = _inject(cypher)
|
||||
# The labeled (n:AWSRole) gets the label, but the bare (n) in RETURN should not
|
||||
assert f"(n:AWSRole:{LABEL})" in result
|
||||
# Count how many times the label appears - should be 1 (from MATCH only)
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
def test_bare_not_injected_in_where(self):
|
||||
cypher = "MATCH (n:AWSRole) WHERE (n.x > 1) RETURN n"
|
||||
result = _inject(cypher)
|
||||
# (n.x > 1) is an expression group, not a node pattern - should be untouched
|
||||
assert "(n.x > 1)" in result
|
||||
|
||||
def test_bare_not_injected_in_with(self):
|
||||
cypher = "MATCH (n:AWSRole) WITH (n) RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
def test_bare_not_injected_in_unwind(self):
|
||||
cypher = "UNWIND nodes(path) as n OPTIONAL MATCH (n)-[r]-(m) RETURN n"
|
||||
result = _inject(cypher)
|
||||
# (n) and (m) in OPTIONAL MATCH get injected, but nodes(path) in UNWIND does not
|
||||
assert f"(n:{LABEL})" in result
|
||||
assert f"(m:{LABEL})" in result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Function call exclusion
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestFunctionCallExclusion:
|
||||
@pytest.mark.parametrize(
|
||||
"func_call",
|
||||
[
|
||||
"collect(DISTINCT pf)",
|
||||
"any(x IN stmt.action WHERE toLower(x) = 'iam:*')",
|
||||
"toLower(action)",
|
||||
"nodes(path)",
|
||||
"count(n)",
|
||||
"apoc.create.vNode(labels)",
|
||||
"EXISTS(n.prop)",
|
||||
"size(n.list)",
|
||||
],
|
||||
)
|
||||
def test_function_calls_not_injected(self, func_call):
|
||||
cypher = f"MATCH (n:AWSRole) WHERE {func_call} RETURN n"
|
||||
result = _inject(cypher)
|
||||
# The function call should remain unchanged
|
||||
assert func_call in result
|
||||
# Only the MATCH labeled node should get the label
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# String and comment protection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestProtection:
|
||||
def test_string_with_fake_node_pattern(self):
|
||||
cypher = "MATCH (n:AWSRole) WHERE n.name = '(fake:Label)' RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert "'(fake:Label)'" in result
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
def test_double_quoted_string(self):
|
||||
cypher = 'MATCH (n:AWSRole) WHERE n.name = "(fake:Label)" RETURN n'
|
||||
result = _inject(cypher)
|
||||
assert '"(fake:Label)"' in result
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
def test_line_comment_with_node_pattern(self):
|
||||
cypher = "// (n:Fake)\nMATCH (n:AWSRole) RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert "// (n:Fake)" in result
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
def test_string_containing_double_slash(self):
|
||||
"""Strings with // inside should be consumed as strings, not comments."""
|
||||
cypher = "MATCH (n:AWSRole {url: 'https://example.com'}) RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert "'https://example.com'" in result
|
||||
assert f"(n:AWSRole:{LABEL}" in result
|
||||
|
||||
def test_escaped_quotes_in_string(self):
|
||||
cypher = r"MATCH (n:AWSRole) WHERE n.name = 'it\'s a test' RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert result.count(LABEL) == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Clause splitting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestClauseSplitting:
|
||||
def test_case_insensitive_keywords(self):
|
||||
cypher = "match (n:AWSRole) where n.x = 1 return n"
|
||||
result = _inject(cypher)
|
||||
assert f"(n:AWSRole:{LABEL})" in result
|
||||
|
||||
def test_optional_match_with_extra_whitespace(self):
|
||||
cypher = "OPTIONAL MATCH (n:AWSRole) RETURN n"
|
||||
result = _inject(cypher)
|
||||
assert f"(n:AWSRole:{LABEL})" in result
|
||||
|
||||
def test_multiple_match_clauses(self):
|
||||
cypher = (
|
||||
"MATCH (a:AWSAccount)--(b:AWSRole) "
|
||||
"MATCH (b)--(c:AWSPolicy) "
|
||||
"RETURN a, b, c"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(a:AWSAccount:{LABEL})" in result
|
||||
assert f"(b:AWSRole:{LABEL})" in result
|
||||
assert f"(c:AWSPolicy:{LABEL})" in result
|
||||
# (b) in second MATCH is bare and gets injected
|
||||
assert result.count(LABEL) == 4 # a, b (labeled), b (bare in 2nd MATCH), c
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Real-world query patterns from aws.py
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRealWorldQueries:
|
||||
def test_basic_resource_query(self):
|
||||
cypher = (
|
||||
"MATCH path = (aws:AWSAccount {id: $provider_uid})--(rds:RDSInstance)\n"
|
||||
"UNWIND nodes(path) as n\n"
|
||||
"OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding {status: 'FAIL'})\n"
|
||||
"RETURN path, collect(DISTINCT pf) as dpf"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(aws:AWSAccount:{LABEL} {{id: $provider_uid}})" in result
|
||||
assert f"(rds:RDSInstance:{LABEL})" in result
|
||||
assert f"(n:{LABEL})" in result
|
||||
assert f"(pf:ProwlerFinding:{LABEL} {{status: 'FAIL'}})" in result
|
||||
assert "nodes(path)" in result # function call untouched
|
||||
assert "collect(DISTINCT pf)" in result # function call untouched
|
||||
|
||||
def test_privilege_escalation_query(self):
|
||||
cypher = (
|
||||
"MATCH path_principal = (aws:AWSAccount {id: $uid})"
|
||||
"--(principal:AWSPrincipal)--(pol:AWSPolicy)\n"
|
||||
"WHERE pol.effect = 'Allow'\n"
|
||||
"MATCH (principal)--(cfn_policy:AWSPolicy)"
|
||||
"--(stmt_cfn:AWSPolicyStatement)\n"
|
||||
"WHERE any(action IN stmt_cfn.action WHERE toLower(action) = 'iam:passrole')\n"
|
||||
"MATCH path_target = (aws)--(target_role:AWSRole)"
|
||||
"-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {arn: 'cloudformation.amazonaws.com'})\n"
|
||||
"RETURN path_principal, path_target"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(aws:AWSAccount:{LABEL} {{id: $uid}})" in result
|
||||
assert f"(principal:AWSPrincipal:{LABEL})" in result
|
||||
assert f"(pol:AWSPolicy:{LABEL})" in result
|
||||
assert f"(principal:{LABEL})" in result # bare in 2nd MATCH
|
||||
assert f"(cfn_policy:AWSPolicy:{LABEL})" in result
|
||||
assert f"(stmt_cfn:AWSPolicyStatement:{LABEL})" in result
|
||||
assert f"(aws:{LABEL})" in result # bare in 3rd MATCH
|
||||
assert f"(target_role:AWSRole:{LABEL})" in result
|
||||
assert (
|
||||
f"(:AWSPrincipal:{LABEL} {{arn: 'cloudformation.amazonaws.com'}})" in result
|
||||
)
|
||||
# Function calls in WHERE untouched
|
||||
assert "any(action IN" in result
|
||||
assert "toLower(action)" in result
|
||||
|
||||
def test_custom_bare_query(self):
|
||||
cypher = (
|
||||
"MATCH (a)-[:HAS_POLICY]->(b)\n"
|
||||
"WHERE a.name CONTAINS 'admin'\n"
|
||||
"RETURN a, b"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(a:{LABEL})" in result
|
||||
assert f"(b:{LABEL})" in result
|
||||
assert result.count(LABEL) == 2
|
||||
|
||||
def test_internet_via_path_connectivity(self):
|
||||
"""Post-refactor pattern: Internet reached via CAN_ACCESS, not standalone."""
|
||||
cypher = (
|
||||
"MATCH path = (aws:AWSAccount {id: $provider_uid})--(ec2:EC2Instance)\n"
|
||||
"WHERE ec2.exposed_internet = true\n"
|
||||
"OPTIONAL MATCH (internet:Internet)-[can_access:CAN_ACCESS]->(ec2)\n"
|
||||
"RETURN path, internet, can_access"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(aws:AWSAccount:{LABEL}" in result
|
||||
assert f"(ec2:EC2Instance:{LABEL})" in result
|
||||
assert f"(internet:Internet:{LABEL})" in result
|
||||
# ec2 in OPTIONAL MATCH is bare, but already labeled via Pass A won't match it
|
||||
# because it has no label. It IS bare, so Pass B injects.
|
||||
assert f"(ec2:{LABEL})" in result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Edge cases
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
def test_empty_query(self):
|
||||
assert _inject("") == ""
|
||||
|
||||
def test_no_node_patterns(self):
|
||||
cypher = "RETURN 1 + 2"
|
||||
assert _inject(cypher) == cypher
|
||||
|
||||
def test_anonymous_empty_parens_not_injected(self):
|
||||
"""Empty () in MATCH is extremely rare but should not be injected."""
|
||||
cypher = "MATCH ()--(m:AWSRole) RETURN m"
|
||||
result = _inject(cypher)
|
||||
assert "()" in result # empty parens untouched
|
||||
assert f"(m:AWSRole:{LABEL})" in result
|
||||
|
||||
def test_fully_anonymous_query_bypasses_injection(self):
|
||||
"""All-anonymous patterns bypass injection entirely.
|
||||
|
||||
MATCH ()--()--() has no labels and no variables, so neither Pass A
|
||||
(labeled) nor Pass B (bare identifier) can inject the provider label.
|
||||
This is safe because _serialize_graph() (Layer 3) filters every
|
||||
returned node by provider label, dropping cross-provider data before
|
||||
it reaches the user.
|
||||
"""
|
||||
cypher = "MATCH ()--()--() RETURN *"
|
||||
result = _inject(cypher)
|
||||
assert result == cypher # completely unmodified
|
||||
assert LABEL not in result
|
||||
|
||||
def test_relationship_patterns_untouched(self):
|
||||
cypher = "MATCH (a:X)-[r:REL_TYPE {x: 1}]->(b:Y) RETURN a"
|
||||
result = _inject(cypher)
|
||||
assert "[r:REL_TYPE {x: 1}]" in result # relationship untouched
|
||||
assert f"(a:X:{LABEL})" in result
|
||||
assert f"(b:Y:{LABEL})" in result
|
||||
|
||||
def test_call_subquery(self):
|
||||
cypher = (
|
||||
"CALL {\n"
|
||||
" MATCH (inner:AWSRole) RETURN inner\n"
|
||||
"}\n"
|
||||
"MATCH (outer:AWSAccount) RETURN outer, inner"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert f"(inner:AWSRole:{LABEL})" in result
|
||||
assert f"(outer:AWSAccount:{LABEL})" in result
|
||||
|
||||
def test_multiple_protected_regions(self):
|
||||
cypher = (
|
||||
"MATCH (n:X {a: 'hello'}) " 'WHERE n.b = "world" ' "// comment\n" "RETURN n"
|
||||
)
|
||||
result = _inject(cypher)
|
||||
assert "'hello'" in result
|
||||
assert '"world"' in result
|
||||
assert "// comment" in result
|
||||
assert f"(n:X:{LABEL}" in result
|
||||
|
||||
def test_idempotent_on_already_injected(self):
|
||||
"""Running injection twice should add the label twice (not ideal, but predictable)."""
|
||||
first = _inject("MATCH (n:AWSRole) RETURN n")
|
||||
second = _inject(first)
|
||||
# The label appears twice (stacked)
|
||||
assert second.count(LABEL) == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Validation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestValidation:
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"LOAD CSV FROM 'http://169.254.169.254/' AS x RETURN x",
|
||||
"load csv from 'http://evil.com' as row return row",
|
||||
"CALL apoc.load.json('http://evil.com/') YIELD value RETURN value",
|
||||
"CALL apoc.load.csvParams('http://evil.com/', {}, null) YIELD list RETURN list",
|
||||
"CALL apoc.import.csv([{fileName: 'f'}], [], {}) YIELD node RETURN node",
|
||||
"CALL apoc.export.csv.all('file.csv', {})",
|
||||
"CALL apoc.cypher.run('CREATE (n)', {}) YIELD value RETURN value",
|
||||
"CALL apoc.systemdb.graph() YIELD nodes RETURN nodes",
|
||||
"CALL apoc.config.list() YIELD key, value RETURN key, value",
|
||||
"CALL apoc.periodic.iterate('MATCH (n) RETURN n', 'DELETE n', {batchSize: 100})",
|
||||
"CALL apoc.do.when(true, 'CREATE (n) RETURN n', '', {}) YIELD value RETURN value",
|
||||
"CALL apoc.trigger.add('t', 'RETURN 1', {phase: 'before'})",
|
||||
"CALL apoc.custom.asProcedure('myProc', 'RETURN 1')",
|
||||
],
|
||||
ids=[
|
||||
"LOAD_CSV",
|
||||
"LOAD_CSV_lowercase",
|
||||
"apoc.load.json",
|
||||
"apoc.load.csvParams",
|
||||
"apoc.import.csv",
|
||||
"apoc.export.csv",
|
||||
"apoc.cypher.run",
|
||||
"apoc.systemdb.graph",
|
||||
"apoc.config.list",
|
||||
"apoc.periodic.iterate",
|
||||
"apoc.do.when",
|
||||
"apoc.trigger.add",
|
||||
"apoc.custom.asProcedure",
|
||||
],
|
||||
)
|
||||
def test_rejects_blocked_patterns(self, cypher):
|
||||
with pytest.raises(ValidationError) as exc:
|
||||
validate_custom_query(cypher)
|
||||
|
||||
assert "blocked operation" in str(exc.value.detail)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"MATCH (n:AWSAccount) RETURN n LIMIT 10",
|
||||
"MATCH (a)-[r]->(b) RETURN a, r, b",
|
||||
"MATCH (n) WHERE n.name CONTAINS 'load' RETURN n",
|
||||
"CALL apoc.create.vNode(['Label'], {}) YIELD node RETURN node",
|
||||
"MATCH (n) WHERE n.name = 'apoc.load.json' RETURN n",
|
||||
'MATCH (n) WHERE n.description = "LOAD CSV is cool" RETURN n',
|
||||
],
|
||||
ids=[
|
||||
"simple_match",
|
||||
"traversal",
|
||||
"contains_load_substring",
|
||||
"apoc_virtual_node",
|
||||
"apoc_load_inside_single_quotes",
|
||||
"load_csv_inside_double_quotes",
|
||||
],
|
||||
)
|
||||
def test_allows_clean_queries(self, cypher):
|
||||
validate_custom_query(cypher)
|
||||
@@ -4,14 +4,25 @@ from unittest.mock import MagicMock
|
||||
from config.settings.sentry import before_send
|
||||
|
||||
|
||||
def _make_log_record(msg, level=logging.ERROR, name="test", args=None):
|
||||
"""Build a real LogRecord so getMessage() works like in production."""
|
||||
record = logging.LogRecord(
|
||||
name=name,
|
||||
level=level,
|
||||
pathname="",
|
||||
lineno=0,
|
||||
msg=msg,
|
||||
args=args,
|
||||
exc_info=None,
|
||||
)
|
||||
return record
|
||||
|
||||
|
||||
def test_before_send_ignores_log_with_ignored_exception():
|
||||
"""Test that before_send ignores logs containing ignored exceptions."""
|
||||
log_record = MagicMock()
|
||||
log_record.msg = "Provider kubernetes is not connected"
|
||||
log_record.levelno = logging.ERROR # 40
|
||||
log_record = _make_log_record("Provider kubernetes is not connected")
|
||||
|
||||
hint = {"log_record": log_record}
|
||||
|
||||
event = MagicMock()
|
||||
|
||||
result = before_send(event, hint)
|
||||
@@ -36,12 +47,9 @@ def test_before_send_ignores_exception_with_ignored_exception():
|
||||
|
||||
def test_before_send_passes_through_non_ignored_log():
|
||||
"""Test that before_send passes through logs that don't contain ignored exceptions."""
|
||||
log_record = MagicMock()
|
||||
log_record.msg = "Some other error message"
|
||||
log_record.levelno = logging.ERROR # 40
|
||||
log_record = _make_log_record("Some other error message")
|
||||
|
||||
hint = {"log_record": log_record}
|
||||
|
||||
event = MagicMock()
|
||||
|
||||
result = before_send(event, hint)
|
||||
@@ -66,15 +74,53 @@ def test_before_send_passes_through_non_ignored_exception():
|
||||
|
||||
def test_before_send_handles_warning_level():
|
||||
"""Test that before_send handles warning level logs."""
|
||||
log_record = MagicMock()
|
||||
log_record.msg = "Provider kubernetes is not connected"
|
||||
log_record.levelno = logging.WARNING # 30
|
||||
log_record = _make_log_record(
|
||||
"Provider kubernetes is not connected", level=logging.WARNING
|
||||
)
|
||||
|
||||
hint = {"log_record": log_record}
|
||||
|
||||
event = MagicMock()
|
||||
|
||||
result = before_send(event, hint)
|
||||
|
||||
# Assert that the event was dropped (None returned)
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_before_send_ignores_neo4j_defunct_connection():
|
||||
"""Test that before_send drops neo4j.io defunct connection logs.
|
||||
|
||||
The Neo4j driver logs transient connection errors at ERROR level
|
||||
before RetryableSession retries them. These are noise.
|
||||
|
||||
The driver uses %s formatting, so "defunct" is in the args, not
|
||||
in the template. This test mirrors the real LogRecord structure.
|
||||
"""
|
||||
log_record = _make_log_record(
|
||||
msg="[#%04X] _: <CONNECTION> error: %s: %r",
|
||||
name="neo4j.io",
|
||||
args=(
|
||||
0xE5CC,
|
||||
"Failed to read from defunct connection "
|
||||
"IPv4Address(('cloud-neo4j.prowler.com', 7687))",
|
||||
ConnectionResetError(104, "Connection reset by peer"),
|
||||
),
|
||||
)
|
||||
|
||||
hint = {"log_record": log_record}
|
||||
event = MagicMock()
|
||||
|
||||
assert before_send(event, hint) is None
|
||||
|
||||
|
||||
def test_before_send_passes_non_defunct_neo4j_log():
|
||||
"""Test that before_send passes through neo4j.io logs that are not about defunct connections."""
|
||||
log_record = _make_log_record(
|
||||
msg="Some other neo4j transport error",
|
||||
name="neo4j.io",
|
||||
)
|
||||
|
||||
hint = {"log_record": log_record}
|
||||
event = MagicMock()
|
||||
|
||||
assert before_send(event, hint) == event
|
||||
|
||||
@@ -45,7 +45,6 @@ from api.models import (
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
FindingGroupDailySummary,
|
||||
Integration,
|
||||
Invitation,
|
||||
LighthouseProviderConfiguration,
|
||||
@@ -808,6 +807,63 @@ class TestTenantViewSet:
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_tenants_list_no_permissions(
|
||||
self, authenticated_client_no_permissions_rbac, tenants_fixture
|
||||
):
|
||||
response = authenticated_client_no_permissions_rbac.get(reverse("tenant-list"))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
|
||||
def test_tenants_retrieve_no_permissions(
|
||||
self, authenticated_client_no_permissions_rbac, tenants_fixture
|
||||
):
|
||||
tenant1, *_ = tenants_fixture
|
||||
response = authenticated_client_no_permissions_rbac.get(
|
||||
reverse("tenant-detail", kwargs={"pk": tenant1.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
|
||||
def test_tenants_create_no_permissions(
|
||||
self, authenticated_client_no_permissions_rbac, valid_tenant_payload
|
||||
):
|
||||
response = authenticated_client_no_permissions_rbac.post(
|
||||
reverse("tenant-list"),
|
||||
data=valid_tenant_payload,
|
||||
format="json",
|
||||
)
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
|
||||
def test_tenants_partial_update_no_permissions(
|
||||
self, authenticated_client_no_permissions_rbac, tenants_fixture
|
||||
):
|
||||
tenant1, *_ = tenants_fixture
|
||||
payload = {
|
||||
"data": {
|
||||
"type": "tenants",
|
||||
"id": str(tenant1.id),
|
||||
"attributes": {"name": "Unauthorized update"},
|
||||
},
|
||||
}
|
||||
response = authenticated_client_no_permissions_rbac.patch(
|
||||
reverse("tenant-detail", kwargs={"pk": tenant1.id}),
|
||||
data=payload,
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
|
||||
@patch("api.v1.views.delete_tenant_task.apply_async")
|
||||
def test_tenants_delete_no_permissions(
|
||||
self,
|
||||
delete_tenant_mock,
|
||||
authenticated_client_no_permissions_rbac,
|
||||
tenants_fixture,
|
||||
):
|
||||
tenant1, *_ = tenants_fixture
|
||||
response = authenticated_client_no_permissions_rbac.delete(
|
||||
reverse("tenant-detail", kwargs={"pk": tenant1.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
delete_tenant_mock.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestMembershipViewSet:
|
||||
@@ -14691,14 +14747,14 @@ class TestMuteRuleViewSet:
|
||||
assert data[0]["id"] == str(mute_rules_fixture[first_index].id)
|
||||
|
||||
@patch("api.v1.views.chain")
|
||||
@patch("api.v1.views.aggregate_finding_group_summaries_task.si")
|
||||
@patch("api.v1.views.reaggregate_all_finding_group_summaries_task.si")
|
||||
@patch("api.v1.views.mute_historical_findings_task.si")
|
||||
@patch("api.v1.views.transaction.on_commit", side_effect=lambda fn: fn())
|
||||
def test_mute_rules_create_valid(
|
||||
self,
|
||||
_mock_on_commit,
|
||||
mock_mute_signature,
|
||||
mock_aggregate_signature,
|
||||
mock_reaggregate_signature,
|
||||
mock_chain,
|
||||
authenticated_client,
|
||||
findings_fixture,
|
||||
@@ -14737,12 +14793,12 @@ class TestMuteRuleViewSet:
|
||||
assert finding.muted_at is not None
|
||||
assert finding.muted_reason == "Security exception approved"
|
||||
|
||||
# Verify background task chain was called
|
||||
# Verify background task chain was called: mute → reaggregate all
|
||||
mock_mute_signature.assert_called_once()
|
||||
mock_aggregate_signature.assert_called_once()
|
||||
mock_reaggregate_signature.assert_called_once()
|
||||
mock_chain.assert_called_once_with(
|
||||
mock_mute_signature.return_value,
|
||||
mock_aggregate_signature.return_value,
|
||||
mock_reaggregate_signature.return_value,
|
||||
)
|
||||
mock_chain.return_value.apply_async.assert_called_once()
|
||||
|
||||
@@ -15217,6 +15273,29 @@ class TestFindingGroupViewSet:
|
||||
# ec2_instance_public_ip has 1 PASS and 1 FAIL, should aggregate to FAIL
|
||||
assert data[0]["attributes"]["status"] == "FAIL"
|
||||
|
||||
def test_finding_groups_region_filter_reaggregates_metrics(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test finding-level filters recompute group metrics from matching findings."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"filter[check_id]": "ec2_instance_public_ip",
|
||||
"filter[region]": "us-east-1",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
|
||||
attrs = data[0]["attributes"]
|
||||
assert attrs["status"] == "PASS"
|
||||
assert attrs["pass_count"] == 1
|
||||
assert attrs["fail_count"] == 0
|
||||
assert attrs["resources_total"] == 1
|
||||
assert attrs["resources_fail"] == 0
|
||||
|
||||
def test_finding_groups_status_pass_when_no_fail(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
@@ -15245,6 +15324,182 @@ class TestFindingGroupViewSet:
|
||||
# rds_encryption has all muted findings
|
||||
assert data[0]["attributes"]["status"] == "MUTED"
|
||||
|
||||
def test_finding_groups_status_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test finding groups can be filtered by aggregated status."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{"filter[inserted_at]": TODAY, "filter[status]": "FAIL"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["status"] == "FAIL" for item in data)
|
||||
|
||||
def test_finding_groups_status_in_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test finding groups support status__in filter on aggregated status."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{"filter[inserted_at]": TODAY, "filter[status__in]": "FAIL,PASS"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["status"] in {"FAIL", "PASS"} for item in data)
|
||||
|
||||
def test_finding_groups_severity_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test finding groups can be filtered by aggregated severity."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{"filter[inserted_at]": TODAY, "filter[severity]": "critical"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["severity"] == "critical" for item in data)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
def test_finding_groups_combined_region_and_status_filters(
|
||||
self, authenticated_client, finding_groups_fixture, endpoint_name
|
||||
):
|
||||
"""Test combined region + aggregated status filters."""
|
||||
params = {"filter[region]": "us-east-1", "filter[status]": "FAIL"}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
check_ids = {item["id"] for item in data}
|
||||
assert check_ids == {"s3_bucket_public_access", "cloudtrail_enabled"}
|
||||
assert all(item["attributes"]["status"] == "FAIL" for item in data)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
def test_finding_groups_combined_delta_and_severity_filters(
|
||||
self, authenticated_client, finding_groups_fixture, endpoint_name
|
||||
):
|
||||
"""Test combined delta + aggregated severity filters."""
|
||||
params = {"filter[delta]": "new", "filter[severity]": "critical"}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
check_ids = {item["id"] for item in data}
|
||||
assert check_ids == {"s3_bucket_public_access", "cloudtrail_enabled"}
|
||||
assert all(item["attributes"]["severity"] == "critical" for item in data)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"filter_key,filter_value",
|
||||
[
|
||||
("status", "INVALID_STATUS"),
|
||||
("severity", "INVALID_SEVERITY"),
|
||||
],
|
||||
)
|
||||
def test_finding_groups_invalid_status_or_severity_returns_400(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
endpoint_name,
|
||||
filter_key,
|
||||
filter_value,
|
||||
):
|
||||
"""Test invalid aggregated status/severity values are rejected."""
|
||||
params = {f"filter[{filter_key}]": filter_value}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.json()["errors"][0]["code"] == "invalid"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"filter_key,filter_value,expected_detail",
|
||||
[
|
||||
("status__in", "FAIL,INVALID_STATUS", "invalid status filter"),
|
||||
("severity__in", "critical,INVALID_SEVERITY", "invalid severity filter"),
|
||||
],
|
||||
)
|
||||
def test_finding_groups_invalid_in_filters_return_400(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
endpoint_name,
|
||||
filter_key,
|
||||
filter_value,
|
||||
expected_detail,
|
||||
):
|
||||
"""Test invalid values in status__in/severity__in are rejected."""
|
||||
params = {f"filter[{filter_key}]": filter_value}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
errors = response.json()["errors"]
|
||||
assert errors[0]["code"] == "invalid"
|
||||
assert expected_detail in errors[0]["detail"]
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"filter_name,filter_value",
|
||||
[
|
||||
("region", "__region_does_not_exist__"),
|
||||
("service", "__service_does_not_exist__"),
|
||||
("category", "__category_does_not_exist__"),
|
||||
("resource_groups", "__group_does_not_exist__"),
|
||||
("resource_type", "__type_does_not_exist__"),
|
||||
("scan", "00000000-0000-7000-8000-000000000001"),
|
||||
],
|
||||
)
|
||||
def test_finding_groups_finding_level_filters_are_applied(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
filter_name,
|
||||
filter_value,
|
||||
):
|
||||
"""Test finding-level filters are applied in /finding-groups aggregation."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{"filter[inserted_at]": TODAY, f"filter[{filter_name}]": filter_value},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 0
|
||||
|
||||
def test_finding_groups_delta_filter_is_applied(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test delta filter is applied in /finding-groups aggregation."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{"filter[inserted_at]": TODAY, "filter[delta]": "new"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["new_count"] > 0 for item in data)
|
||||
|
||||
def test_finding_groups_provider_aggregation(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
@@ -15555,6 +15810,50 @@ class TestFindingGroupViewSet:
|
||||
assert len(data) == 1
|
||||
assert data[0]["id"] == "s3_bucket_public_access"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"extra_filters",
|
||||
[
|
||||
{},
|
||||
{"filter[muted]": "include"},
|
||||
],
|
||||
ids=["summary_path", "finding_level_path"],
|
||||
)
|
||||
def test_check_title_icontains_includes_all_title_variants(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_title_variants_fixture,
|
||||
extra_filters,
|
||||
):
|
||||
"""
|
||||
Regression: two providers report the same check_id with different
|
||||
checktitle values (e.g. after a Prowler version upgrade). Filtering
|
||||
by check_title__icontains with a term that matches only ONE variant
|
||||
must still return the finding group with counts from BOTH providers.
|
||||
|
||||
Parametrized to cover both aggregation paths:
|
||||
- summary_path: default, uses _CheckTitleToCheckIdMixin on summaries
|
||||
- finding_level_path: filter[muted]=include forces CommonFindingFilters
|
||||
"""
|
||||
params = {
|
||||
"filter[inserted_at]": TODAY,
|
||||
"filter[check_title.icontains]": "Ensure repository",
|
||||
**extra_filters,
|
||||
}
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
params,
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
assert data[0]["id"] == "github_secret_scanning_enabled"
|
||||
attrs = data[0]["attributes"]
|
||||
# Both providers' findings must be counted
|
||||
assert attrs["fail_count"] == 2, (
|
||||
"fail_count must include findings from both providers, "
|
||||
"regardless of which title variant matches the search"
|
||||
)
|
||||
|
||||
def test_resources_not_found(self, authenticated_client):
|
||||
"""Test 404 returned for nonexistent check_id."""
|
||||
response = authenticated_client.get(
|
||||
@@ -15596,6 +15895,44 @@ class TestFindingGroupViewSet:
|
||||
assert resource.get("region"), "resource.region must not be empty"
|
||||
assert resource.get("type"), "resource.type must not be empty"
|
||||
|
||||
def test_resources_resource_group(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test resource_group is extracted from check_metadata.resourcegroup."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
|
||||
),
|
||||
{"filter[inserted_at]": TODAY},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 2
|
||||
for item in data:
|
||||
resource = item["attributes"]["resource"]
|
||||
assert (
|
||||
resource["resource_group"] == "storage"
|
||||
), "resource_group must be 'storage'"
|
||||
|
||||
def test_resources_name_icontains(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test resource_name__icontains filters resources by name substring."""
|
||||
# s3_bucket_public_access has "My Instance 1" and "My Instance 2"
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
|
||||
),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"filter[resource_name.icontains]": "Instance 1",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
assert "Instance 1" in data[0]["attributes"]["resource"]["name"]
|
||||
|
||||
def test_resources_provider_info(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
@@ -15674,6 +16011,191 @@ class TestFindingGroupViewSet:
|
||||
# Should still return the 2 resources within the date range
|
||||
assert len(response.json()["data"]) == 2
|
||||
|
||||
def test_resources_status_filter_returns_empty_not_404(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test that filtering by status on a valid check returns empty list, not 404."""
|
||||
# s3_bucket_public_access has only FAIL findings, filtering by PASS should return []
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
|
||||
),
|
||||
{"filter[inserted_at]": TODAY, "filter[status]": "PASS"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response.json()["data"] == []
|
||||
|
||||
def test_resources_nonexistent_check_still_404(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test that a truly nonexistent check_id still returns 404."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-resources", kwargs={"pk": "totally_fake_check"}),
|
||||
{"filter[inserted_at]": TODAY},
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_resources_sort_by_status_ascending(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test sort=status returns PASS before FAIL."""
|
||||
# ec2_instance_public_ip has 1 PASS (resource1) and 1 FAIL (resource2)
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources",
|
||||
kwargs={"pk": "ec2_instance_public_ip"},
|
||||
),
|
||||
{"filter[inserted_at]": TODAY, "sort": "status"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 2
|
||||
assert data[0]["attributes"]["status"] == "PASS"
|
||||
assert data[1]["attributes"]["status"] == "FAIL"
|
||||
|
||||
def test_resources_sort_by_status_descending(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test sort=-status returns FAIL before PASS."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources",
|
||||
kwargs={"pk": "ec2_instance_public_ip"},
|
||||
),
|
||||
{"filter[inserted_at]": TODAY, "sort": "-status"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 2
|
||||
assert data[0]["attributes"]["status"] == "FAIL"
|
||||
assert data[1]["attributes"]["status"] == "PASS"
|
||||
|
||||
def test_resources_sort_invalid_field_returns_400(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test that an invalid sort field returns 400."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
|
||||
),
|
||||
{"filter[inserted_at]": TODAY, "sort": "invalid_field"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
def test_latest_resources_status_filter_returns_empty_not_404(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test latest resources with status filter on valid check returns empty, not 404."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-latest_resources",
|
||||
kwargs={"check_id": "s3_bucket_public_access"},
|
||||
),
|
||||
{"filter[status]": "PASS"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response.json()["data"] == []
|
||||
|
||||
def test_latest_resources_sort_by_status(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test latest resources sort=status returns PASS before FAIL."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-latest_resources",
|
||||
kwargs={"check_id": "ec2_instance_public_ip"},
|
||||
),
|
||||
{"sort": "status"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 2
|
||||
assert data[0]["attributes"]["status"] == "PASS"
|
||||
assert data[1]["attributes"]["status"] == "FAIL"
|
||||
|
||||
def test_resources_nonexistent_check_missing_date_returns_400(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Nonexistent check_id with missing required date filter returns 400, not 404."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-resources", kwargs={"pk": "totally_fake_check"}),
|
||||
)
|
||||
# FindingGroupFilter requires inserted_at — validation fires before existence check
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
def test_resources_nonexistent_check_invalid_sort_returns_400(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Nonexistent check_id with invalid sort returns 400, not 404."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-resources", kwargs={"pk": "totally_fake_check"}),
|
||||
{"filter[inserted_at]": TODAY, "sort": "invalid_field"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
def test_resources_empty_sort_falls_back_to_default_order(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Degenerate sort values should behave like no sort, not raise 500."""
|
||||
all_ids = set()
|
||||
for page_num in (1, 2):
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources",
|
||||
kwargs={"pk": "s3_bucket_public_access"},
|
||||
),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"sort": ",",
|
||||
"page[size]": 1,
|
||||
"page[number]": page_num,
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
all_ids.add(data[0]["id"])
|
||||
assert len(all_ids) == 2
|
||||
|
||||
def test_resources_sort_pagination_stability(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Sort with small page size returns all resources without duplicates or gaps."""
|
||||
# s3_bucket_public_access has 2 resources, both FAIL — they tie on status
|
||||
all_ids = set()
|
||||
for page_num in (1, 2):
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources",
|
||||
kwargs={"pk": "s3_bucket_public_access"},
|
||||
),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"sort": "status",
|
||||
"page[size]": 1,
|
||||
"page[number]": page_num,
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
all_ids.add(data[0]["id"])
|
||||
# Both pages should return different resources (no duplicates)
|
||||
assert len(all_ids) == 2
|
||||
|
||||
def test_latest_resources_nonexistent_check_invalid_sort_returns_400(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Nonexistent check_id with invalid sort on latest returns 400, not 404."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-latest_resources",
|
||||
kwargs={"check_id": "totally_fake_check"},
|
||||
),
|
||||
{"sort": "invalid_field"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
# Test provider_id filter actually filters data
|
||||
def test_finding_groups_provider_id_filter_actually_filters(
|
||||
self, authenticated_client, finding_groups_fixture, providers_fixture
|
||||
@@ -15853,47 +16375,257 @@ class TestFindingGroupViewSet:
|
||||
assert len(data) == 1
|
||||
assert data[0]["id"] == "cloudtrail_enabled"
|
||||
|
||||
def test_finding_groups_latest_aggregates_latest_per_provider(
|
||||
self, authenticated_client, providers_fixture
|
||||
def test_finding_groups_latest_status_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest aggregates latest summary from each provider for the same check."""
|
||||
"""Test /latest supports status filter on aggregated status."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[status]": "FAIL"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["status"] == "FAIL" for item in data)
|
||||
|
||||
def test_finding_groups_latest_region_filter_reaggregates_metrics(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest recomputes metrics from findings matching region filter."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{
|
||||
"filter[check_id]": "ec2_instance_public_ip",
|
||||
"filter[region]": "us-east-1",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
|
||||
attrs = data[0]["attributes"]
|
||||
assert attrs["status"] == "PASS"
|
||||
assert attrs["pass_count"] == 1
|
||||
assert attrs["fail_count"] == 0
|
||||
assert attrs["resources_total"] == 1
|
||||
assert attrs["resources_fail"] == 0
|
||||
|
||||
def test_finding_groups_latest_status_in_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest supports status__in filter on aggregated status."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[status__in]": "FAIL,PASS"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["status"] in {"FAIL", "PASS"} for item in data)
|
||||
|
||||
def test_finding_groups_latest_severity_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest supports severity filter on aggregated severity."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[severity]": "critical"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["severity"] == "critical" for item in data)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"filter_name,filter_value",
|
||||
[
|
||||
("region", "__region_does_not_exist__"),
|
||||
("service", "__service_does_not_exist__"),
|
||||
("category", "__category_does_not_exist__"),
|
||||
("resource_groups", "__group_does_not_exist__"),
|
||||
("resource_type", "__type_does_not_exist__"),
|
||||
("scan", "00000000-0000-7000-8000-000000000001"),
|
||||
],
|
||||
)
|
||||
def test_finding_groups_latest_finding_level_filters_are_applied(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
filter_name,
|
||||
filter_value,
|
||||
):
|
||||
"""Test finding-level filters are applied in /finding-groups/latest aggregation."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{f"filter[{filter_name}]": filter_value},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 0
|
||||
|
||||
def test_finding_groups_check_title_filter_applies_with_delta(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test check_title filter is honored when finding-level path is used."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"filter[delta]": "new",
|
||||
"filter[check_title.icontains]": "__missing_check_title__",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 0
|
||||
|
||||
def test_finding_groups_latest_check_title_filter_applies_with_delta(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest check_title filter is honored on finding-level path."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{
|
||||
"filter[delta]": "new",
|
||||
"filter[check_title.icontains]": "__missing_check_title__",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 0
|
||||
|
||||
def test_finding_groups_latest_delta_filter_is_applied(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test delta filter is applied in /finding-groups/latest aggregation."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[delta]": "new"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
assert all(item["attributes"]["new_count"] > 0 for item in data)
|
||||
|
||||
def test_finding_groups_latest_aggregates_latest_per_provider(
|
||||
self,
|
||||
authenticated_client,
|
||||
providers_fixture,
|
||||
resources_fixture,
|
||||
):
|
||||
"""Test /latest keeps all findings from the latest scan per provider.
|
||||
|
||||
Verifies that when the latest scan produces multiple findings for the
|
||||
same check_id (e.g. one per resource), all of them are included in the
|
||||
aggregation — not just one.
|
||||
"""
|
||||
provider1 = providers_fixture[0]
|
||||
provider2 = providers_fixture[1]
|
||||
|
||||
resource1 = resources_fixture[0]
|
||||
resource2 = resources_fixture[1]
|
||||
resource3 = resources_fixture[2]
|
||||
check_id = "cross_provider_latest_resources_total"
|
||||
now = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
|
||||
|
||||
FindingGroupDailySummary.objects.create(
|
||||
latest_scan_provider1 = Scan.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
provider=provider1,
|
||||
check_id=check_id,
|
||||
inserted_at=now - timedelta(days=1),
|
||||
resources_total=20,
|
||||
resources_fail=20,
|
||||
fail_count=20,
|
||||
state=StateChoices.COMPLETED,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
completed_at=datetime.now(timezone.utc),
|
||||
)
|
||||
FindingGroupDailySummary.objects.create(
|
||||
|
||||
latest_scan_provider2 = Scan.objects.create(
|
||||
tenant_id=provider2.tenant_id,
|
||||
provider=provider2,
|
||||
check_id=check_id,
|
||||
inserted_at=now,
|
||||
resources_total=7,
|
||||
resources_fail=7,
|
||||
fail_count=7,
|
||||
state=StateChoices.COMPLETED,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
completed_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
older_scan_provider1 = Scan.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
provider=provider1,
|
||||
state=StateChoices.COMPLETED,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
completed_at=datetime.now(timezone.utc) - timedelta(days=1),
|
||||
)
|
||||
|
||||
# Older scan — these should be excluded from /latest
|
||||
Finding.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
uid="old_cross_provider_1",
|
||||
scan=older_scan_provider1,
|
||||
delta="new",
|
||||
status="FAIL",
|
||||
severity="high",
|
||||
impact="high",
|
||||
check_id=check_id,
|
||||
check_metadata={"CheckId": check_id, "checktitle": "Cross provider check"},
|
||||
first_seen_at=datetime.now(timezone.utc) - timedelta(days=2),
|
||||
muted=False,
|
||||
)
|
||||
|
||||
# Latest scan provider1: TWO findings (PASS + FAIL) for the same check
|
||||
latest_p1_pass = Finding.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
uid="latest_cross_provider_1_pass",
|
||||
scan=latest_scan_provider1,
|
||||
delta="new",
|
||||
status="PASS",
|
||||
severity="high",
|
||||
impact="high",
|
||||
check_id=check_id,
|
||||
check_metadata={"CheckId": check_id, "checktitle": "Cross provider check"},
|
||||
first_seen_at=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
muted=False,
|
||||
)
|
||||
latest_p1_pass.add_resources([resource1])
|
||||
|
||||
latest_p1_fail = Finding.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
uid="latest_cross_provider_1_fail",
|
||||
scan=latest_scan_provider1,
|
||||
delta="new",
|
||||
status="FAIL",
|
||||
severity="high",
|
||||
impact="high",
|
||||
check_id=check_id,
|
||||
check_metadata={"CheckId": check_id, "checktitle": "Cross provider check"},
|
||||
first_seen_at=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
muted=False,
|
||||
)
|
||||
latest_p1_fail.add_resources([resource2])
|
||||
|
||||
# Latest scan provider2: one finding
|
||||
latest_p2 = Finding.objects.create(
|
||||
tenant_id=provider2.tenant_id,
|
||||
uid="latest_cross_provider_2",
|
||||
scan=latest_scan_provider2,
|
||||
delta="new",
|
||||
status="FAIL",
|
||||
severity="high",
|
||||
impact="high",
|
||||
check_id=check_id,
|
||||
check_metadata={"CheckId": check_id, "checktitle": "Cross provider check"},
|
||||
first_seen_at=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
muted=False,
|
||||
)
|
||||
latest_p2.add_resources([resource3])
|
||||
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[check_id]": check_id},
|
||||
{"filter[check_id]": check_id, "filter[delta]": "new"},
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
attrs = data[0]["attributes"]
|
||||
assert attrs["resources_total"] == 27
|
||||
assert attrs["resources_fail"] == 27
|
||||
assert attrs["fail_count"] == 27
|
||||
# 3 findings total: 2 from provider1 latest + 1 from provider2 latest
|
||||
assert attrs["pass_count"] == 1
|
||||
assert attrs["fail_count"] == 2
|
||||
assert attrs["resources_total"] == 3
|
||||
assert attrs["resources_fail"] == 2
|
||||
|
||||
def test_finding_groups_latest_provider_type_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
@@ -15934,6 +16666,44 @@ class TestFindingGroupViewSet:
|
||||
check_ids = [item["id"] for item in data]
|
||||
assert check_ids == sorted(check_ids)
|
||||
|
||||
def test_finding_groups_latest_sort_by_check_title(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test /latest supports sorting by check_title."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"sort": "check_title"},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
check_titles = [item["attributes"]["check_title"] for item in data]
|
||||
assert check_titles == sorted(check_titles)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"sort_field",
|
||||
["first_seen_at", "-first_seen_at", "last_seen_at", "failing_since"],
|
||||
)
|
||||
def test_finding_groups_sort_by_time_fields(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
endpoint_name,
|
||||
sort_field,
|
||||
):
|
||||
"""Test sorting by aggregated time fields (first_seen_at, last_seen_at, failing_since)."""
|
||||
params = {"sort": sort_field}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
|
||||
def test_finding_groups_latest_ignores_date_filters(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
|
||||
@@ -4180,6 +4180,7 @@ class FindingGroupResourceSerializer(BaseSerializerV1):
|
||||
severity = serializers.CharField()
|
||||
first_seen_at = serializers.DateTimeField(required=False, allow_null=True)
|
||||
last_seen_at = serializers.DateTimeField(required=False, allow_null=True)
|
||||
muted_reason = serializers.CharField(required=False, allow_null=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "finding-group-resources"
|
||||
@@ -4193,6 +4194,7 @@ class FindingGroupResourceSerializer(BaseSerializerV1):
|
||||
"service": {"type": "string"},
|
||||
"region": {"type": "string"},
|
||||
"type": {"type": "string"},
|
||||
"resource_group": {"type": "string"},
|
||||
},
|
||||
}
|
||||
)
|
||||
@@ -4204,6 +4206,7 @@ class FindingGroupResourceSerializer(BaseSerializerV1):
|
||||
"service": obj.get("resource_service", ""),
|
||||
"region": obj.get("resource_region", ""),
|
||||
"type": obj.get("resource_type", ""),
|
||||
"resource_group": obj.get("resource_group", ""),
|
||||
}
|
||||
|
||||
@extend_schema_field(
|
||||
|
||||
+468
-214
@@ -31,6 +31,7 @@ from django.contrib.postgres.search import SearchQuery
|
||||
from django.db import transaction
|
||||
from django.db.models import (
|
||||
Case,
|
||||
CharField,
|
||||
Count,
|
||||
DecimalField,
|
||||
ExpressionWrapper,
|
||||
@@ -47,7 +48,8 @@ from django.db.models import (
|
||||
When,
|
||||
Window,
|
||||
)
|
||||
from django.db.models.functions import Coalesce, RowNumber
|
||||
from django.db.models.fields.json import KeyTextTransform
|
||||
from django.db.models.functions import Cast, Coalesce, RowNumber
|
||||
from django.http import HttpResponse, QueryDict
|
||||
from django.shortcuts import redirect
|
||||
from django.urls import reverse
|
||||
@@ -82,7 +84,6 @@ from tasks.beat import schedule_provider_scan
|
||||
from tasks.jobs.attack_paths import db_utils as attack_paths_db_utils
|
||||
from tasks.jobs.export import get_s3_client
|
||||
from tasks.tasks import (
|
||||
aggregate_finding_group_summaries_task,
|
||||
backfill_compliance_summaries_task,
|
||||
backfill_scan_resource_summaries_task,
|
||||
check_integration_connection_task,
|
||||
@@ -94,6 +95,7 @@ from tasks.tasks import (
|
||||
jira_integration_task,
|
||||
mute_historical_findings_task,
|
||||
perform_scan_task,
|
||||
reaggregate_all_finding_group_summaries_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
)
|
||||
|
||||
@@ -124,6 +126,7 @@ from api.filters import (
|
||||
CustomDjangoFilterBackend,
|
||||
DailySeveritySummaryFilter,
|
||||
FindingFilter,
|
||||
FindingGroupAggregatedComputedFilter,
|
||||
FindingGroupFilter,
|
||||
FindingGroupSummaryFilter,
|
||||
IntegrationFilter,
|
||||
@@ -409,7 +412,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.23.0"
|
||||
spectacular_settings.VERSION = "1.24.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -1208,6 +1211,17 @@ class TenantViewSet(BaseTenantViewset):
|
||||
# RBAC required permissions
|
||||
required_permissions = [Permissions.MANAGE_ACCOUNT]
|
||||
|
||||
def set_required_permissions(self):
|
||||
"""
|
||||
Returns the required permissions based on the request method.
|
||||
"""
|
||||
if self.action in ("list", "retrieve", "create"):
|
||||
# No permissions required for listing, retrieving or creating tenants
|
||||
self.required_permissions = []
|
||||
else:
|
||||
# Require MANAGE_ACCOUNT for update and delete
|
||||
self.required_permissions = [Permissions.MANAGE_ACCOUNT]
|
||||
|
||||
def get_queryset(self):
|
||||
queryset = Tenant.objects.filter(membership__user=self.request.user)
|
||||
return queryset.prefetch_related("memberships")
|
||||
@@ -3469,7 +3483,7 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
request,
|
||||
filtered_queryset,
|
||||
manager=Finding.all_objects,
|
||||
select_related=["scan"],
|
||||
select_related=["scan__provider"],
|
||||
prefetch_related=["resources"],
|
||||
)
|
||||
|
||||
@@ -3639,7 +3653,7 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
tenant_id = request.tenant_id
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset())
|
||||
|
||||
latest_scan_ids = (
|
||||
latest_scan_ids = list(
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
@@ -3653,7 +3667,7 @@ class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
request,
|
||||
filtered_queryset,
|
||||
manager=Finding.all_objects,
|
||||
select_related=["scan"],
|
||||
select_related=["scan__provider"],
|
||||
prefetch_related=["resources"],
|
||||
)
|
||||
|
||||
@@ -6726,23 +6740,15 @@ class MuteRuleViewSet(BaseRLSViewSet):
|
||||
muted_reason=mute_rule.reason,
|
||||
)
|
||||
|
||||
# Launch background task for historical muting
|
||||
latest_scan_id = (
|
||||
Scan.objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("-completed_at", "-inserted_at")
|
||||
.values_list("id", flat=True)
|
||||
.first()
|
||||
)
|
||||
|
||||
# Launch background task for historical muting + reaggregation
|
||||
transaction.on_commit(
|
||||
lambda: chain(
|
||||
mute_historical_findings_task.si(
|
||||
tenant_id=tenant_id,
|
||||
mute_rule_id=str(mute_rule.id),
|
||||
),
|
||||
aggregate_finding_group_summaries_task.si(
|
||||
reaggregate_all_finding_group_summaries_task.si(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(latest_scan_id),
|
||||
),
|
||||
).apply_async()
|
||||
)
|
||||
@@ -6786,13 +6792,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
security analysts to see which checks are failing across their
|
||||
infrastructure without scrolling through thousands of individual findings.
|
||||
|
||||
Uses pre-aggregated FindingGroupDailySummary table for efficient queries.
|
||||
Daily summaries are re-aggregated across the requested date range.
|
||||
Uses a hybrid strategy: pre-aggregated daily summaries when possible,
|
||||
and raw findings when finding-level filters require precise subset metrics.
|
||||
"""
|
||||
|
||||
queryset = FindingGroupDailySummary.objects.all()
|
||||
serializer_class = FindingGroupSerializer
|
||||
filterset_class = FindingGroupSummaryFilter
|
||||
filterset_class = FindingGroupFilter
|
||||
filter_backends = [
|
||||
jsonapi_filters.QueryParameterValidationFilter,
|
||||
jsonapi_filters.OrderingFilter,
|
||||
@@ -6811,12 +6817,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
affects the OpenAPI schema generated by drf-spectacular.
|
||||
"""
|
||||
if self.action == "latest":
|
||||
return LatestFindingGroupSummaryFilter
|
||||
return LatestFindingGroupFilter
|
||||
if self.action == "resources":
|
||||
return FindingGroupFilter
|
||||
if self.action == "latest_resources":
|
||||
return LatestFindingGroupFilter
|
||||
return FindingGroupSummaryFilter
|
||||
return FindingGroupFilter
|
||||
|
||||
def get_queryset(self):
|
||||
"""Get the base FindingGroupDailySummary queryset with RLS filtering."""
|
||||
@@ -6872,8 +6878,15 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
"resource_type__icontains": "type__icontains",
|
||||
}
|
||||
|
||||
# Fields accepted directly by LatestResourceFilter (no translation needed)
|
||||
_RESOURCE_FILTER_FIELDS = {
|
||||
f"{field}__{lookup}"
|
||||
for field, lookups in LatestResourceFilter.Meta.fields.items()
|
||||
for lookup in lookups
|
||||
} | set(LatestResourceFilter.Meta.fields.keys())
|
||||
|
||||
def _split_resource_filters(self, params: QueryDict) -> tuple[QueryDict, QueryDict]:
|
||||
resource_keys = set(self.RESOURCE_FILTER_MAP)
|
||||
resource_keys = set(self.RESOURCE_FILTER_MAP) | self._RESOURCE_FILTER_FIELDS
|
||||
finding_params = QueryDict(mutable=True)
|
||||
resource_params = QueryDict(mutable=True)
|
||||
for key, values in params.lists():
|
||||
@@ -6894,11 +6907,16 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
queryset = queryset.filter(tenant_id=tenant_id)
|
||||
|
||||
filter_params = QueryDict(mutable=True)
|
||||
for key, mapped_key in self.RESOURCE_FILTER_MAP.items():
|
||||
if key not in params:
|
||||
for key, values in params.lists():
|
||||
# Translate resource_* prefixed keys via the map
|
||||
if key in self.RESOURCE_FILTER_MAP:
|
||||
mapped_key = self.RESOURCE_FILTER_MAP[key]
|
||||
elif key in self._RESOURCE_FILTER_FIELDS:
|
||||
mapped_key = key
|
||||
else:
|
||||
continue
|
||||
|
||||
if key == "resources" or key.endswith("__in"):
|
||||
values = params.getlist(key)
|
||||
items: list[str] = []
|
||||
for value in values:
|
||||
if value is None:
|
||||
@@ -6923,20 +6941,27 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
|
||||
return filterset.qs.values("id")
|
||||
|
||||
def _get_finding_level_filter_keys(self, latest: bool = False) -> set[str]:
|
||||
"""Derive filters that require querying raw findings."""
|
||||
summary_filterset = (
|
||||
LatestFindingGroupSummaryFilter if latest else FindingGroupSummaryFilter
|
||||
)
|
||||
finding_filterset = LatestFindingGroupFilter if latest else FindingGroupFilter
|
||||
|
||||
summary_supported = set(summary_filterset.base_filters.keys())
|
||||
finding_supported = set(finding_filterset.base_filters.keys())
|
||||
return finding_supported - summary_supported
|
||||
|
||||
def _requires_finding_level_aggregation(
|
||||
self, params: QueryDict, latest: bool = False
|
||||
) -> bool:
|
||||
finding_level_keys = self._get_finding_level_filter_keys(latest=latest)
|
||||
return any(key in finding_level_keys for key in params.keys())
|
||||
|
||||
def _aggregate_daily_summaries(self, queryset):
|
||||
"""
|
||||
Re-aggregate daily summaries across the date range.
|
||||
|
||||
Takes pre-computed daily summaries and aggregates them by check_id
|
||||
to produce totals across the selected date range.
|
||||
"""
|
||||
from django.db.models import CharField
|
||||
from django.db.models.functions import Cast
|
||||
|
||||
"""Re-aggregate summary rows by check_id."""
|
||||
return queryset.values("check_id").annotate(
|
||||
# Max severity across days
|
||||
severity_order=Max("severity_order"),
|
||||
# Sum counts across days
|
||||
pass_count=Sum("pass_count"),
|
||||
fail_count=Sum("fail_count"),
|
||||
muted_count=Sum("muted_count"),
|
||||
@@ -6944,22 +6969,99 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
changed_count=Sum("changed_count"),
|
||||
resources_total=Sum("resources_total"),
|
||||
resources_fail=Sum("resources_fail"),
|
||||
# Collect provider types using StringAgg (cast enum to text first)
|
||||
impacted_providers_str=StringAgg(
|
||||
Cast("provider__provider", CharField()),
|
||||
delimiter=",",
|
||||
distinct=True,
|
||||
default="",
|
||||
),
|
||||
# Min/Max timing across days
|
||||
first_seen_at=Min("first_seen_at"),
|
||||
last_seen_at=Max("last_seen_at"),
|
||||
failing_since=Min("failing_since"),
|
||||
# Get check metadata from first row (same for all days)
|
||||
agg_first_seen_at=Min("first_seen_at"),
|
||||
agg_last_seen_at=Max("last_seen_at"),
|
||||
agg_failing_since=Min("failing_since"),
|
||||
check_title=Max("check_title"),
|
||||
check_description=Max("check_description"),
|
||||
)
|
||||
|
||||
def _aggregate_findings(self, queryset):
|
||||
"""Aggregate findings by check_id for finding-group endpoints."""
|
||||
severity_case = Case(
|
||||
*[
|
||||
When(severity=severity, then=Value(order))
|
||||
for severity, order in SEVERITY_ORDER.items()
|
||||
],
|
||||
output_field=IntegerField(),
|
||||
)
|
||||
|
||||
return queryset.values("check_id").annotate(
|
||||
severity_order=Max(severity_case),
|
||||
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
|
||||
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
|
||||
muted_count=Count("id", filter=Q(muted=True)),
|
||||
new_count=Count("id", filter=Q(delta="new", muted=False)),
|
||||
changed_count=Count("id", filter=Q(delta="changed", muted=False)),
|
||||
resources_total=Count("resources__id", distinct=True),
|
||||
resources_fail=Count(
|
||||
"resources__id",
|
||||
distinct=True,
|
||||
filter=Q(status="FAIL", muted=False),
|
||||
),
|
||||
impacted_providers_str=StringAgg(
|
||||
Cast("scan__provider__provider", CharField()),
|
||||
delimiter=",",
|
||||
distinct=True,
|
||||
default="",
|
||||
),
|
||||
agg_first_seen_at=Min("first_seen_at"),
|
||||
agg_last_seen_at=Max("inserted_at"),
|
||||
agg_failing_since=Min(
|
||||
"first_seen_at", filter=Q(status="FAIL", muted=False)
|
||||
),
|
||||
check_title=Coalesce(
|
||||
Max(KeyTextTransform("checktitle", "check_metadata")),
|
||||
Max(KeyTextTransform("CheckTitle", "check_metadata")),
|
||||
Max(KeyTextTransform("Checktitle", "check_metadata")),
|
||||
),
|
||||
check_description=Coalesce(
|
||||
Max(KeyTextTransform("description", "check_metadata")),
|
||||
Max(KeyTextTransform("Description", "check_metadata")),
|
||||
),
|
||||
)
|
||||
|
||||
def _split_computed_aggregate_filters(
|
||||
self, params: QueryDict
|
||||
) -> tuple[QueryDict, QueryDict]:
|
||||
"""Split finding filters from computed aggregate filters."""
|
||||
computed_keys = {
|
||||
"status",
|
||||
"status__in",
|
||||
"severity",
|
||||
"severity__in",
|
||||
"include_muted",
|
||||
}
|
||||
finding_params = QueryDict(mutable=True)
|
||||
computed_params = QueryDict(mutable=True)
|
||||
|
||||
for key, values in params.lists():
|
||||
if key in computed_keys:
|
||||
computed_params.setlist(key, values)
|
||||
else:
|
||||
finding_params.setlist(key, values)
|
||||
|
||||
return finding_params, computed_params
|
||||
|
||||
def _get_latest_findings_per_provider(self, filtered_queryset):
|
||||
"""Keep only findings from each provider's most recent completed scan."""
|
||||
latest_scan_ids = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=self.request.tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
.order_by("provider_id", "-completed_at", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values("id")
|
||||
)
|
||||
return filtered_queryset.filter(scan_id__in=latest_scan_ids)
|
||||
|
||||
def _post_process_aggregation(self, aggregated_data):
|
||||
"""
|
||||
Post-process aggregation results to add computed fields.
|
||||
@@ -6976,6 +7078,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
severity_order, "informational"
|
||||
)
|
||||
|
||||
if "agg_first_seen_at" in row:
|
||||
row["first_seen_at"] = row.pop("agg_first_seen_at")
|
||||
if "agg_last_seen_at" in row:
|
||||
row["last_seen_at"] = row.pop("agg_last_seen_at")
|
||||
if "agg_failing_since" in row:
|
||||
row["failing_since"] = row.pop("agg_failing_since")
|
||||
|
||||
# Compute aggregated status
|
||||
if row.get("fail_count", 0) > 0:
|
||||
row["status"] = "FAIL"
|
||||
@@ -6994,22 +7103,40 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
|
||||
return results
|
||||
|
||||
def _validate_sort_fields(self, sort_param):
|
||||
"""Validate and map JSON:API sort fields for aggregated finding groups."""
|
||||
sort_field_map = {
|
||||
"check_id": "check_id",
|
||||
"severity": "severity_order",
|
||||
"fail_count": "fail_count",
|
||||
"pass_count": "pass_count",
|
||||
"muted_count": "muted_count",
|
||||
"new_count": "new_count",
|
||||
"changed_count": "changed_count",
|
||||
"resources_total": "resources_total",
|
||||
"resources_fail": "resources_fail",
|
||||
"first_seen_at": "first_seen_at",
|
||||
"last_seen_at": "last_seen_at",
|
||||
"failing_since": "failing_since",
|
||||
}
|
||||
_FINDING_GROUP_SORT_MAP = {
|
||||
"check_id": "check_id",
|
||||
"check_title": "check_title",
|
||||
"severity": "severity_order",
|
||||
"fail_count": "fail_count",
|
||||
"pass_count": "pass_count",
|
||||
"muted_count": "muted_count",
|
||||
"new_count": "new_count",
|
||||
"changed_count": "changed_count",
|
||||
"resources_total": "resources_total",
|
||||
"resources_fail": "resources_fail",
|
||||
"first_seen_at": "agg_first_seen_at",
|
||||
"last_seen_at": "agg_last_seen_at",
|
||||
"failing_since": "agg_failing_since",
|
||||
}
|
||||
|
||||
_RESOURCE_SORT_MAP = {
|
||||
"status": "status_order",
|
||||
"severity": "severity_order",
|
||||
"first_seen_at": "first_seen_at",
|
||||
"last_seen_at": "last_seen_at",
|
||||
"resource.uid": "resource_uid",
|
||||
"resource.name": "resource_name",
|
||||
"resource.region": "resource_region",
|
||||
"resource.service": "resource_service",
|
||||
"resource.type": "resource_type",
|
||||
"provider.uid": "provider_uid",
|
||||
"provider.alias": "provider_alias",
|
||||
}
|
||||
|
||||
def _validate_sort_fields(self, sort_param, sort_field_map=None):
|
||||
"""Validate and map JSON:API sort fields using the given field map."""
|
||||
if sort_field_map is None:
|
||||
sort_field_map = self._FINDING_GROUP_SORT_MAP
|
||||
|
||||
ordering = []
|
||||
for field in sort_param.split(","):
|
||||
@@ -7019,7 +7146,6 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
is_desc = field.startswith("-")
|
||||
raw_field = field[1:] if is_desc else field
|
||||
if raw_field not in sort_field_map:
|
||||
# Validate sort fields explicitly to return JSON:API 400 instead of FieldError.
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
@@ -7035,6 +7161,33 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
|
||||
return ordering
|
||||
|
||||
def _apply_aggregated_computed_filters(self, queryset, computed_params: QueryDict):
|
||||
"""Apply computed filters (status/severity) on aggregated finding-group rows."""
|
||||
if not computed_params:
|
||||
return queryset
|
||||
|
||||
if computed_params.get("status") or computed_params.getlist("status__in"):
|
||||
queryset = queryset.annotate(
|
||||
aggregated_status=Case(
|
||||
When(fail_count__gt=0, then=Value("FAIL")),
|
||||
When(pass_count__gt=0, then=Value("PASS")),
|
||||
default=Value("MUTED"),
|
||||
output_field=CharField(),
|
||||
)
|
||||
)
|
||||
|
||||
# Exclude fully-muted groups by default unless include_muted is set
|
||||
if "include_muted" not in computed_params:
|
||||
queryset = queryset.exclude(fail_count=0, pass_count=0, muted_count__gt=0)
|
||||
|
||||
filterset = FindingGroupAggregatedComputedFilter(
|
||||
computed_params, queryset=queryset
|
||||
)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
|
||||
return filterset.qs
|
||||
|
||||
def _build_resource_mapping_queryset(
|
||||
self, filtered_queryset, resource_ids=None, tenant_id: str | None = None
|
||||
):
|
||||
@@ -7107,9 +7260,67 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
),
|
||||
first_seen_at=Min("finding__first_seen_at"),
|
||||
last_seen_at=Max("finding__inserted_at"),
|
||||
# Max() on muted_reason / check_metadata is safe because
|
||||
# all findings for the same resource+check share identical
|
||||
# values (mute rules and metadata are applied per-check).
|
||||
muted_reason=Max("finding__muted_reason"),
|
||||
resource_group=Max(
|
||||
KeyTextTransform("resourcegroup", "finding__check_metadata")
|
||||
),
|
||||
)
|
||||
.filter(resource_id__isnull=False)
|
||||
.order_by("resource_id")
|
||||
)
|
||||
|
||||
# Annotations needed for each sort field (lightweight versions for ordering)
|
||||
_RESOURCE_SORT_ANNOTATIONS = {
|
||||
"status_order": lambda: Max(
|
||||
Case(
|
||||
When(finding__status="FAIL", finding__muted=False, then=Value(3)),
|
||||
When(finding__status="PASS", finding__muted=False, then=Value(2)),
|
||||
default=Value(1),
|
||||
output_field=IntegerField(),
|
||||
)
|
||||
),
|
||||
"severity_order": lambda: Max(
|
||||
Case(
|
||||
*[
|
||||
When(finding__severity=severity, then=Value(order))
|
||||
for severity, order in SEVERITY_ORDER.items()
|
||||
],
|
||||
output_field=IntegerField(),
|
||||
)
|
||||
),
|
||||
"first_seen_at": lambda: Min("finding__first_seen_at"),
|
||||
"last_seen_at": lambda: Max("finding__inserted_at"),
|
||||
"resource_uid": lambda: Max("resource__uid"),
|
||||
"resource_name": lambda: Max("resource__name"),
|
||||
"resource_region": lambda: Max("resource__region"),
|
||||
"resource_service": lambda: Max("resource__service"),
|
||||
"resource_type": lambda: Max("resource__type"),
|
||||
"provider_uid": lambda: Max("resource__provider__uid"),
|
||||
"provider_alias": lambda: Max("resource__provider__alias"),
|
||||
}
|
||||
|
||||
def _build_resource_ordering_queryset(
|
||||
self, filtered_queryset, resource_ids, tenant_id, ordering
|
||||
):
|
||||
"""Build a lightweight aggregation with only the columns needed for sorting."""
|
||||
mapping_qs = self._build_resource_mapping_queryset(
|
||||
filtered_queryset, resource_ids=resource_ids, tenant_id=tenant_id
|
||||
)
|
||||
|
||||
# Collect only the annotations required by the requested ordering
|
||||
annotations = {}
|
||||
for field in ordering:
|
||||
col = field.lstrip("-")
|
||||
if col != "resource_id" and col in self._RESOURCE_SORT_ANNOTATIONS:
|
||||
annotations[col] = self._RESOURCE_SORT_ANNOTATIONS[col]()
|
||||
|
||||
return (
|
||||
mapping_qs.values("resource_id")
|
||||
.annotate(**annotations)
|
||||
.filter(resource_id__isnull=False)
|
||||
.order_by(*ordering)
|
||||
)
|
||||
|
||||
def _post_process_resources(self, resource_data):
|
||||
@@ -7142,56 +7353,183 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
),
|
||||
"first_seen_at": row["first_seen_at"],
|
||||
"last_seen_at": row["last_seen_at"],
|
||||
"muted_reason": row.get("muted_reason"),
|
||||
"resource_group": row.get("resource_group", ""),
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _build_aggregated_queryset(self, finding_params, latest=False):
|
||||
"""Select the summary or findings path and return an aggregated queryset."""
|
||||
finding_filterset_class = (
|
||||
LatestFindingGroupFilter if latest else FindingGroupFilter
|
||||
)
|
||||
summary_filterset_class = (
|
||||
LatestFindingGroupSummaryFilter if latest else FindingGroupSummaryFilter
|
||||
)
|
||||
|
||||
if self._requires_finding_level_aggregation(finding_params, latest=latest):
|
||||
finding_queryset = self._get_finding_queryset()
|
||||
filterset = finding_filterset_class(
|
||||
finding_params, queryset=finding_queryset
|
||||
)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
if latest:
|
||||
filtered_queryset = self._get_latest_findings_per_provider(
|
||||
filtered_queryset
|
||||
)
|
||||
return self._aggregate_findings(filtered_queryset)
|
||||
|
||||
summary_queryset = self.get_queryset()
|
||||
filterset = summary_filterset_class(finding_params, queryset=summary_queryset)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
# Only include summaries from each provider's most recent date
|
||||
# (within the filtered range).
|
||||
# We use a subquery to strip the Window annotation so it does not
|
||||
# leak into the GROUP BY of _aggregate_daily_summaries.
|
||||
latest_per_provider = filtered_queryset.annotate(
|
||||
_max_provider_date=Window(
|
||||
expression=Max("inserted_at"),
|
||||
partition_by=[F("provider_id")],
|
||||
),
|
||||
).filter(inserted_at=F("_max_provider_date"))
|
||||
clean_queryset = FindingGroupDailySummary.objects.filter(
|
||||
pk__in=latest_per_provider.values("pk")
|
||||
)
|
||||
return self._aggregate_daily_summaries(clean_queryset)
|
||||
|
||||
def _sorted_paginated_response(
|
||||
self,
|
||||
request,
|
||||
aggregated_queryset,
|
||||
):
|
||||
"""Apply ordering, pagination, post-processing, and return the Response."""
|
||||
sort_param = request.query_params.get("sort")
|
||||
if sort_param:
|
||||
ordering = self._validate_sort_fields(
|
||||
sort_param, self._FINDING_GROUP_SORT_MAP
|
||||
)
|
||||
if ordering:
|
||||
aggregated_queryset = aggregated_queryset.order_by(*ordering)
|
||||
else:
|
||||
aggregated_queryset = aggregated_queryset.order_by(
|
||||
"-fail_count", "-severity_order", "check_id"
|
||||
)
|
||||
|
||||
page = self.paginate_queryset(aggregated_queryset)
|
||||
if page is not None:
|
||||
processed_data = self._post_process_aggregation(page)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
processed_data = self._post_process_aggregation(aggregated_queryset)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
def _validate_resource_sort(self, request):
|
||||
"""Validate the sort parameter for resource endpoints (raises 400 if invalid)."""
|
||||
sort_param = request.query_params.get("sort")
|
||||
if sort_param:
|
||||
self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
|
||||
|
||||
def _paginated_resource_response(
|
||||
self, request, filtered_queryset, resource_ids, tenant_id
|
||||
):
|
||||
"""Paginate and return resources.
|
||||
|
||||
Without sort: paginate lightweight resource IDs first, aggregate only the page.
|
||||
With sort: build a lightweight ordering subquery (resource_id + sort keys),
|
||||
paginate that, then aggregate full details only for the page.
|
||||
"""
|
||||
sort_param = request.query_params.get("sort")
|
||||
|
||||
if sort_param:
|
||||
ordering = self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
|
||||
if ordering:
|
||||
if "resource_id" not in {field.lstrip("-") for field in ordering}:
|
||||
ordering.append("resource_id")
|
||||
|
||||
# Phase 1: lightweight aggregation with only sort keys, paginate
|
||||
ordering_qs = self._build_resource_ordering_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=tenant_id,
|
||||
ordering=ordering,
|
||||
)
|
||||
page = self.paginate_queryset(ordering_qs)
|
||||
if page is not None:
|
||||
page_ids = [row["resource_id"] for row in page]
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
# Re-sort to match the page ordering
|
||||
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
page_ids = [row["resource_id"] for row in ordering_qs]
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
# No sort (or only empty sort fragments): paginate lightweight resource IDs
|
||||
# first, aggregate only the page.
|
||||
mapping_qs = self._build_resource_mapping_queryset(
|
||||
filtered_queryset, resource_ids=resource_ids, tenant_id=tenant_id
|
||||
)
|
||||
resource_id_qs = (
|
||||
mapping_qs.values_list("resource_id", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_id")
|
||||
)
|
||||
|
||||
page_ids = self.paginate_queryset(resource_id_qs)
|
||||
if page_ids is not None:
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=resource_ids, tenant_id=tenant_id
|
||||
).order_by("resource_id")
|
||||
results = self._post_process_resources(resource_data)
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""
|
||||
List finding groups with aggregation and filtering.
|
||||
|
||||
Returns findings grouped by check_id with aggregated metrics.
|
||||
Requires at least one date filter for performance.
|
||||
Uses pre-aggregated daily summaries for efficient queries.
|
||||
Uses summaries when possible and raw findings for finding-level filters.
|
||||
"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
# Apply filters
|
||||
normalized_params = self._normalize_jsonapi_params(request.query_params)
|
||||
filterset = self.filterset_class(normalized_params, queryset=queryset)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
|
||||
# Re-aggregate daily summaries across the date range
|
||||
aggregated_queryset = self._aggregate_daily_summaries(filtered_queryset)
|
||||
|
||||
# Apply ordering (respect JSON:API sort param or use default)
|
||||
sort_param = request.query_params.get("sort")
|
||||
if sort_param:
|
||||
# Convert JSON:API sort notation (prefix '-' for descending)
|
||||
ordering = self._validate_sort_fields(sort_param)
|
||||
if ordering:
|
||||
aggregated_queryset = aggregated_queryset.order_by(*ordering)
|
||||
else:
|
||||
# Default ordering: failures first, then severity, then check_id
|
||||
aggregated_queryset = aggregated_queryset.order_by(
|
||||
"-fail_count", "-severity_order", "check_id"
|
||||
)
|
||||
|
||||
# Paginate
|
||||
page = self.paginate_queryset(aggregated_queryset)
|
||||
if page is not None:
|
||||
# Post-process the page
|
||||
processed_data = self._post_process_aggregation(page)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
# Post-process all results (no pagination)
|
||||
processed_data = self._post_process_aggregation(aggregated_queryset)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return Response(serializer.data)
|
||||
finding_params, computed_params = self._split_computed_aggregate_filters(
|
||||
normalized_params
|
||||
)
|
||||
aggregated_qs = self._build_aggregated_queryset(finding_params, latest=False)
|
||||
aggregated_qs = self._apply_aggregated_computed_filters(
|
||||
aggregated_qs, computed_params
|
||||
)
|
||||
return self._sorted_paginated_response(request, aggregated_qs)
|
||||
|
||||
@extend_schema(
|
||||
summary="List latest finding groups",
|
||||
@@ -7209,58 +7547,22 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
"""
|
||||
List the latest finding group state per check_id.
|
||||
|
||||
Returns findings grouped by check_id using the latest available
|
||||
inserted_at date per check_id, without requiring date filters.
|
||||
Returns findings grouped by check_id using latest data per
|
||||
(check_id, provider), without requiring date filters.
|
||||
"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
# Apply other filters (provider_id, provider_type, check_id, etc.)
|
||||
normalized_params = self._normalize_jsonapi_params(request.query_params)
|
||||
# Remove date filters since we're using latest
|
||||
for key in list(normalized_params.keys()):
|
||||
if key.startswith("inserted_at"):
|
||||
del normalized_params[key]
|
||||
|
||||
filterset_class = self.get_filterset_class()
|
||||
filterset = filterset_class(normalized_params, queryset=queryset)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
|
||||
# Keep only the latest row per (check_id, provider), then aggregate by check_id.
|
||||
latest_per_check_ids = (
|
||||
filtered_queryset.order_by("check_id", "provider_id", "-inserted_at")
|
||||
.distinct("check_id", "provider_id")
|
||||
.values("id")
|
||||
finding_params, computed_params = self._split_computed_aggregate_filters(
|
||||
normalized_params
|
||||
)
|
||||
latest_per_check = filtered_queryset.filter(
|
||||
id__in=Subquery(latest_per_check_ids)
|
||||
aggregated_qs = self._build_aggregated_queryset(finding_params, latest=True)
|
||||
aggregated_qs = self._apply_aggregated_computed_filters(
|
||||
aggregated_qs, computed_params
|
||||
)
|
||||
|
||||
# Re-aggregate daily summaries
|
||||
aggregated_queryset = self._aggregate_daily_summaries(latest_per_check)
|
||||
|
||||
# Apply ordering
|
||||
sort_param = request.query_params.get("sort")
|
||||
if sort_param:
|
||||
ordering = self._validate_sort_fields(sort_param)
|
||||
if ordering:
|
||||
aggregated_queryset = aggregated_queryset.order_by(*ordering)
|
||||
else:
|
||||
aggregated_queryset = aggregated_queryset.order_by(
|
||||
"-fail_count", "-severity_order", "check_id"
|
||||
)
|
||||
|
||||
# Paginate
|
||||
page = self.paginate_queryset(aggregated_queryset)
|
||||
if page is not None:
|
||||
processed_data = self._post_process_aggregation(page)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
processed_data = self._post_process_aggregation(aggregated_queryset)
|
||||
serializer = self.get_serializer(processed_data, many=True)
|
||||
return Response(serializer.data)
|
||||
return self._sorted_paginated_response(request, aggregated_qs)
|
||||
|
||||
@extend_schema(
|
||||
summary="List resources for a finding group",
|
||||
@@ -7284,57 +7586,33 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
check_id = pk
|
||||
queryset = self._get_finding_queryset()
|
||||
|
||||
# Apply date filters from request to Finding queryset
|
||||
# 1. Normalize and split params
|
||||
normalized_params = self._normalize_jsonapi_params(request.query_params)
|
||||
finding_params, resource_params = self._split_resource_filters(
|
||||
normalized_params
|
||||
)
|
||||
|
||||
# 2. Validate all inputs before any DB existence check
|
||||
filterset = FindingGroupFilter(finding_params, queryset=queryset)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
# Access .qs to trigger filter_queryset validation (e.g. required date filters)
|
||||
filtered_queryset = filterset.qs
|
||||
|
||||
# Filter by check_id
|
||||
filtered_queryset = filtered_queryset.filter(check_id=check_id)
|
||||
|
||||
# Check if any findings exist for this check_id
|
||||
if not filtered_queryset.exists():
|
||||
raise NotFound(f"Finding group '{check_id}' not found.")
|
||||
|
||||
resource_ids = self._resource_ids_from_params(
|
||||
resource_params, request.tenant_id
|
||||
)
|
||||
mapping_queryset = self._build_resource_mapping_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
)
|
||||
resource_id_queryset = (
|
||||
mapping_queryset.values_list("resource_id", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_id")
|
||||
)
|
||||
self._validate_resource_sort(request)
|
||||
|
||||
page_ids = self.paginate_queryset(resource_id_queryset)
|
||||
if page_ids is not None:
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset,
|
||||
resource_ids=page_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
)
|
||||
results = self._post_process_resources(resource_data)
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
# 3. Check if the finding group exists (scoped to tenant/RBAC, ignoring user filters)
|
||||
if not queryset.filter(check_id=check_id).exists():
|
||||
raise NotFound(f"Finding group '{check_id}' not found.")
|
||||
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
# 4. Narrow to check_id
|
||||
filtered_queryset = filtered_queryset.filter(check_id=check_id)
|
||||
|
||||
return self._paginated_resource_response(
|
||||
request, filtered_queryset, resource_ids, request.tenant_id
|
||||
)
|
||||
results = self._post_process_resources(resource_data)
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@extend_schema(
|
||||
summary="List resources for a finding group from latest scans",
|
||||
@@ -7378,55 +7656,31 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
if key.startswith("inserted_at"):
|
||||
del normalized_params[key]
|
||||
|
||||
# 1. Normalize and split params
|
||||
finding_params, resource_params = self._split_resource_filters(
|
||||
normalized_params
|
||||
)
|
||||
|
||||
# 2. Validate all inputs before any DB existence check
|
||||
filterset = LatestFindingGroupFilter(finding_params, queryset=queryset)
|
||||
if not filterset.is_valid():
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
resource_ids = self._resource_ids_from_params(
|
||||
resource_params, request.tenant_id
|
||||
)
|
||||
self._validate_resource_sort(request)
|
||||
|
||||
# Filter to latest scans and check_id
|
||||
# 3. Check if the finding group exists (scoped to tenant/RBAC + latest scans)
|
||||
if not queryset.filter(scan_id__in=latest_scan_ids, check_id=check_id).exists():
|
||||
raise NotFound(f"Finding group '{check_id}' not found.")
|
||||
|
||||
# 4. Narrow to latest scans + check_id
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
scan_id__in=latest_scan_ids,
|
||||
check_id=check_id,
|
||||
)
|
||||
|
||||
# Check if any findings exist for this check_id
|
||||
if not filtered_queryset.exists():
|
||||
raise NotFound(f"Finding group '{check_id}' not found.")
|
||||
|
||||
resource_ids = self._resource_ids_from_params(
|
||||
resource_params, request.tenant_id
|
||||
return self._paginated_resource_response(
|
||||
request, filtered_queryset, resource_ids, request.tenant_id
|
||||
)
|
||||
mapping_queryset = self._build_resource_mapping_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
)
|
||||
resource_id_queryset = (
|
||||
mapping_queryset.values_list("resource_id", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_id")
|
||||
)
|
||||
|
||||
page_ids = self.paginate_queryset(resource_id_queryset)
|
||||
if page_ids is not None:
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset,
|
||||
resource_ids=page_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
)
|
||||
results = self._post_process_resources(resource_data)
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=request.tenant_id,
|
||||
)
|
||||
results = self._post_process_resources(resource_data)
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@@ -299,3 +299,8 @@ DJANGO_DELETION_BATCH_SIZE = env.int("DJANGO_DELETION_BATCH_SIZE", 5000)
|
||||
# SAML requirement
|
||||
CSRF_COOKIE_SECURE = True
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
# Attack Paths
|
||||
ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES = env.int(
|
||||
"ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES", 2880
|
||||
) # 48h
|
||||
|
||||
@@ -1,10 +1,52 @@
|
||||
from urllib.parse import quote
|
||||
|
||||
from config.env import env
|
||||
|
||||
_VALID_SCHEMES = {"redis", "rediss"}
|
||||
|
||||
|
||||
def _build_celery_broker_url(
|
||||
scheme: str,
|
||||
username: str,
|
||||
password: str,
|
||||
host: str,
|
||||
port: str,
|
||||
db: str,
|
||||
) -> str:
|
||||
if scheme not in _VALID_SCHEMES:
|
||||
raise ValueError(
|
||||
f"Invalid VALKEY_SCHEME '{scheme}'. Must be one of: {', '.join(sorted(_VALID_SCHEMES))}"
|
||||
)
|
||||
|
||||
encoded_username = quote(username, safe="") if username else ""
|
||||
encoded_password = quote(password, safe="") if password else ""
|
||||
|
||||
auth = ""
|
||||
if encoded_username and encoded_password:
|
||||
auth = f"{encoded_username}:{encoded_password}@"
|
||||
elif encoded_password:
|
||||
auth = f":{encoded_password}@"
|
||||
elif encoded_username:
|
||||
auth = f"{encoded_username}@"
|
||||
|
||||
return f"{scheme}://{auth}{host}:{port}/{db}"
|
||||
|
||||
|
||||
VALKEY_SCHEME = env("VALKEY_SCHEME", default="redis")
|
||||
VALKEY_USERNAME = env("VALKEY_USERNAME", default="")
|
||||
VALKEY_PASSWORD = env("VALKEY_PASSWORD", default="")
|
||||
VALKEY_HOST = env("VALKEY_HOST", default="valkey")
|
||||
VALKEY_PORT = env("VALKEY_PORT", default="6379")
|
||||
VALKEY_DB = env("VALKEY_DB", default="0")
|
||||
|
||||
CELERY_BROKER_URL = f"redis://{VALKEY_HOST}:{VALKEY_PORT}/{VALKEY_DB}"
|
||||
CELERY_BROKER_URL = _build_celery_broker_url(
|
||||
VALKEY_SCHEME,
|
||||
VALKEY_USERNAME,
|
||||
VALKEY_PASSWORD,
|
||||
VALKEY_HOST,
|
||||
VALKEY_PORT,
|
||||
VALKEY_DB,
|
||||
)
|
||||
CELERY_RESULT_BACKEND = "django-db"
|
||||
CELERY_TASK_TRACK_STARTED = True
|
||||
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
import sentry_sdk
|
||||
from config.env import env
|
||||
|
||||
_SENTRY_TAG_FIELDS = {
|
||||
"prowler_provider": "provider",
|
||||
"prowler_region": "region",
|
||||
"prowler_service": "service",
|
||||
"prowler_tenant_id": "tenant_id",
|
||||
"prowler_scan_id": "scan_id",
|
||||
"prowler_provider_uid": "provider_uid",
|
||||
}
|
||||
|
||||
IGNORED_EXCEPTIONS = [
|
||||
# Provider is not connected due to credentials errors
|
||||
"is not connected",
|
||||
@@ -80,18 +89,43 @@ IGNORED_EXCEPTIONS = [
|
||||
|
||||
def before_send(event, hint):
|
||||
"""
|
||||
before_send handles the Sentry events in order to send them or not
|
||||
before_send handles the Sentry events in order to send them or not.
|
||||
|
||||
It also promotes prowler context fields (injected by ProwlerContextFilter)
|
||||
from the LogRecord into Sentry event tags so they become searchable.
|
||||
"""
|
||||
# Ignore logs with the ignored_exceptions
|
||||
# https://docs.python.org/3/library/logging.html#logrecord-objects
|
||||
if "log_record" in hint:
|
||||
log_msg = hint["log_record"].msg
|
||||
log_lvl = hint["log_record"].levelno
|
||||
log_record = hint["log_record"]
|
||||
log_msg = log_record.getMessage()
|
||||
log_lvl = log_record.levelno
|
||||
|
||||
# The Neo4j driver logs transient connection errors (defunct
|
||||
# connections, resets) at ERROR level via the `neo4j.io` logger.
|
||||
# `RetryableSession` handles these with retries. If all retries
|
||||
# are exhausted, the exception propagates and Sentry captures
|
||||
# it as a normal exception event.
|
||||
if (
|
||||
getattr(log_record, "name", "").startswith("neo4j.io")
|
||||
and "defunct" in log_msg
|
||||
):
|
||||
return None
|
||||
|
||||
# Handle Error and Critical events and discard the rest
|
||||
if log_lvl <= 40 and any(ignored in log_msg for ignored in IGNORED_EXCEPTIONS):
|
||||
return None # Explicitly return None to drop the event
|
||||
|
||||
# Promote prowler context fields to Sentry tags
|
||||
for record_attr, tag_name in _SENTRY_TAG_FIELDS.items():
|
||||
value = getattr(log_record, record_attr, None)
|
||||
if value:
|
||||
event.setdefault("tags", {})
|
||||
if isinstance(event["tags"], dict):
|
||||
event["tags"][tag_name] = str(value)
|
||||
elif isinstance(event["tags"], list):
|
||||
event["tags"].append([tag_name, str(value)])
|
||||
|
||||
# Ignore exceptions with the ignored_exceptions
|
||||
if "exc_info" in hint and hint["exc_info"]:
|
||||
exc_value = str(hint["exc_info"][1])
|
||||
|
||||
+103
-2
@@ -111,8 +111,9 @@ def disable_logging():
|
||||
logging.disable(logging.CRITICAL)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def create_test_user(django_db_setup, django_db_blocker):
|
||||
@pytest.fixture(scope="session")
|
||||
def _session_test_user(django_db_setup, django_db_blocker):
|
||||
"""Create the test user once per session. Internal; use create_test_user instead."""
|
||||
with django_db_blocker.unblock():
|
||||
user = User.objects.create_user(
|
||||
name="testing",
|
||||
@@ -122,6 +123,21 @@ def create_test_user(django_db_setup, django_db_blocker):
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_test_user(_session_test_user, django_db_blocker):
|
||||
"""Re-create the session-scoped test user when a TransactionTestCase
|
||||
has truncated the users table."""
|
||||
with django_db_blocker.unblock():
|
||||
if not User.objects.filter(pk=_session_test_user.pk).exists():
|
||||
User.objects.create_user(
|
||||
id=_session_test_user.pk,
|
||||
name="testing",
|
||||
email=TEST_USER,
|
||||
password=TEST_PASSWORD,
|
||||
)
|
||||
return _session_test_user
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user_rbac(django_db_setup, django_db_blocker, tenants_fixture):
|
||||
with django_db_blocker.unblock():
|
||||
@@ -2012,6 +2028,7 @@ def finding_groups_fixture(
|
||||
"CheckId": "s3_bucket_public_access",
|
||||
"checktitle": "Ensure S3 buckets do not allow public access",
|
||||
"Description": "S3 buckets should be configured to restrict public access.",
|
||||
"resourcegroup": "storage",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
muted=False,
|
||||
@@ -2036,6 +2053,7 @@ def finding_groups_fixture(
|
||||
"CheckId": "s3_bucket_public_access",
|
||||
"checktitle": "Ensure S3 buckets do not allow public access",
|
||||
"Description": "S3 buckets should be configured to restrict public access.",
|
||||
"resourcegroup": "storage",
|
||||
},
|
||||
first_seen_at="2024-01-03T00:00:00Z",
|
||||
muted=False,
|
||||
@@ -2234,6 +2252,89 @@ def finding_groups_fixture(
|
||||
return findings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def finding_groups_title_variants_fixture(
|
||||
tenants_fixture, providers_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
"""
|
||||
Two providers report the same check_id with different checktitle values.
|
||||
|
||||
Simulates a Prowler version upgrade where the check title changed but the
|
||||
check_id stayed the same. Used to verify that check_title__icontains
|
||||
resolves to check_id first, so results include all providers regardless
|
||||
of which title variant matches the search term.
|
||||
"""
|
||||
tenant = tenants_fixture[0]
|
||||
provider1, provider2, *_ = providers_fixture
|
||||
scan1, scan2, *_ = scans_fixture
|
||||
resource1, resource2, *_ = resources_fixture
|
||||
|
||||
findings = []
|
||||
|
||||
# Provider 1 — OLD title variant
|
||||
finding_old = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
uid="fg_title_variant_old",
|
||||
scan=scan1,
|
||||
delta="new",
|
||||
status=Status.FAIL,
|
||||
status_extended="Secret scanning not enabled",
|
||||
impact=Severity.high,
|
||||
impact_extended="High risk",
|
||||
severity=Severity.high,
|
||||
raw_result={"status": Status.FAIL, "severity": Severity.high},
|
||||
tags={},
|
||||
check_id="github_secret_scanning_enabled",
|
||||
check_metadata={
|
||||
"CheckId": "github_secret_scanning_enabled",
|
||||
"checktitle": "Ensure repository has secret scanning enabled",
|
||||
"Description": "Checks if secret scanning is enabled.",
|
||||
},
|
||||
first_seen_at="2024-01-01T00:00:00Z",
|
||||
muted=False,
|
||||
)
|
||||
finding_old.add_resources([resource1])
|
||||
findings.append(finding_old)
|
||||
|
||||
# Provider 2 — NEW title variant (same check_id, different checktitle)
|
||||
finding_new = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
uid="fg_title_variant_new",
|
||||
scan=scan2,
|
||||
delta="new",
|
||||
status=Status.FAIL,
|
||||
status_extended="Secret scanning not enabled on repo",
|
||||
impact=Severity.high,
|
||||
impact_extended="High risk",
|
||||
severity=Severity.high,
|
||||
raw_result={"status": Status.FAIL, "severity": Severity.high},
|
||||
tags={},
|
||||
check_id="github_secret_scanning_enabled",
|
||||
check_metadata={
|
||||
"CheckId": "github_secret_scanning_enabled",
|
||||
"checktitle": "Check if secret scanning is enabled in GitHub",
|
||||
"Description": "Checks if secret scanning is enabled.",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
muted=False,
|
||||
)
|
||||
finding_new.add_resources([resource2])
|
||||
findings.append(finding_new)
|
||||
|
||||
from tasks.jobs.scan import aggregate_finding_group_summaries
|
||||
|
||||
aggregate_finding_group_summaries(
|
||||
tenant_id=str(tenant.id),
|
||||
scan_id=str(scan1.id),
|
||||
)
|
||||
aggregate_finding_group_summaries(
|
||||
tenant_id=str(tenant.id),
|
||||
scan_id=str(scan2.id),
|
||||
)
|
||||
|
||||
return findings
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(items):
|
||||
"""Ensure test_rbac.py is executed first."""
|
||||
items.sort(key=lambda item: 0 if "test_rbac.py" in item.nodeid else 1)
|
||||
|
||||
@@ -0,0 +1,152 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from celery import current_app, states
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES
|
||||
from tasks.jobs.attack_paths.db_utils import (
|
||||
_mark_scan_finished,
|
||||
recover_graph_data_ready,
|
||||
)
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.db_router import MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import AttackPathsScan, StateChoices
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def cleanup_stale_attack_paths_scans() -> dict:
|
||||
"""
|
||||
Find `EXECUTING` `AttackPathsScan` scans whose workers are dead or that have
|
||||
exceeded the stale threshold, and mark them as `FAILED`.
|
||||
|
||||
Two-pass detection:
|
||||
1. If `TaskResult.worker` exists, ping the worker.
|
||||
- Dead worker: cleanup immediately (any age).
|
||||
- Alive + past threshold: revoke the task, then cleanup.
|
||||
- Alive + within threshold: skip.
|
||||
2. If no worker field: fall back to time-based heuristic only.
|
||||
"""
|
||||
threshold = timedelta(minutes=ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
cutoff = now - threshold
|
||||
|
||||
executing_scans = (
|
||||
AttackPathsScan.all_objects.using(MainRouter.admin_db)
|
||||
.filter(state=StateChoices.EXECUTING)
|
||||
.select_related("task__task_runner_task")
|
||||
)
|
||||
|
||||
# Cache worker liveness so each worker is pinged at most once
|
||||
executing_scans = list(executing_scans)
|
||||
workers = {
|
||||
tr.worker
|
||||
for scan in executing_scans
|
||||
if (tr := getattr(scan.task, "task_runner_task", None) if scan.task else None)
|
||||
and tr.worker
|
||||
}
|
||||
worker_alive = {w: _is_worker_alive(w) for w in workers}
|
||||
|
||||
cleaned_up = []
|
||||
|
||||
for scan in executing_scans:
|
||||
task_result = (
|
||||
getattr(scan.task, "task_runner_task", None) if scan.task else None
|
||||
)
|
||||
worker = task_result.worker if task_result else None
|
||||
|
||||
if worker:
|
||||
alive = worker_alive.get(worker, True)
|
||||
|
||||
if alive:
|
||||
if scan.started_at and scan.started_at >= cutoff:
|
||||
continue
|
||||
|
||||
# Alive but stale — revoke before cleanup
|
||||
_revoke_task(task_result)
|
||||
reason = (
|
||||
"Scan exceeded stale threshold — " "cleaned up by periodic task"
|
||||
)
|
||||
else:
|
||||
reason = "Worker dead — cleaned up by periodic task"
|
||||
else:
|
||||
# No worker recorded — time-based heuristic only
|
||||
if scan.started_at and scan.started_at >= cutoff:
|
||||
continue
|
||||
reason = (
|
||||
"No worker recorded, scan exceeded stale threshold — "
|
||||
"cleaned up by periodic task"
|
||||
)
|
||||
|
||||
if _cleanup_scan(scan, task_result, reason):
|
||||
cleaned_up.append(str(scan.id))
|
||||
|
||||
logger.info(
|
||||
f"Stale `AttackPathsScan` cleanup: {len(cleaned_up)} scan(s) cleaned up"
|
||||
)
|
||||
return {"cleaned_up_count": len(cleaned_up), "scan_ids": cleaned_up}
|
||||
|
||||
|
||||
def _is_worker_alive(worker: str) -> bool:
|
||||
"""Ping a specific Celery worker. Returns `True` if it responds or on error."""
|
||||
try:
|
||||
response = current_app.control.inspect(destination=[worker], timeout=1.0).ping()
|
||||
return response is not None and worker in response
|
||||
except Exception:
|
||||
logger.exception(f"Failed to ping worker {worker}, treating as alive")
|
||||
return True
|
||||
|
||||
|
||||
def _revoke_task(task_result) -> None:
|
||||
"""Send `SIGTERM` to a hung Celery task. Non-fatal on failure."""
|
||||
try:
|
||||
current_app.control.revoke(
|
||||
task_result.task_id, terminate=True, signal="SIGTERM"
|
||||
)
|
||||
logger.info(f"Revoked task {task_result.task_id}")
|
||||
except Exception:
|
||||
logger.exception(f"Failed to revoke task {task_result.task_id}")
|
||||
|
||||
|
||||
def _cleanup_scan(scan, task_result, reason: str) -> bool:
|
||||
"""
|
||||
Clean up a single stale `AttackPathsScan`:
|
||||
drop temp DB, mark `FAILED`, update `TaskResult`, recover `graph_data_ready`.
|
||||
|
||||
Returns `True` if the scan was actually cleaned up, `False` if skipped.
|
||||
"""
|
||||
scan_id_str = str(scan.id)
|
||||
|
||||
# 1. Drop temp Neo4j database
|
||||
tmp_db_name = graph_database.get_database_name(scan.id, temporary=True)
|
||||
try:
|
||||
graph_database.drop_database(tmp_db_name)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to drop temp database {tmp_db_name}")
|
||||
|
||||
# 2. Lock row, verify still EXECUTING, mark FAILED — all atomic
|
||||
with rls_transaction(str(scan.tenant_id)):
|
||||
try:
|
||||
fresh_scan = AttackPathsScan.objects.select_for_update().get(id=scan.id)
|
||||
except AttackPathsScan.DoesNotExist:
|
||||
logger.warning(f"Scan {scan_id_str} no longer exists, skipping")
|
||||
return False
|
||||
|
||||
if fresh_scan.state != StateChoices.EXECUTING:
|
||||
logger.info(f"Scan {scan_id_str} is now {fresh_scan.state}, skipping")
|
||||
return False
|
||||
|
||||
_mark_scan_finished(fresh_scan, StateChoices.FAILED, {"global_error": reason})
|
||||
|
||||
# 3. Mark `TaskResult` as `FAILURE` (not RLS-protected, outside lock)
|
||||
if task_result:
|
||||
task_result.status = states.FAILURE
|
||||
task_result.date_done = datetime.now(tz=timezone.utc)
|
||||
task_result.save(update_fields=["status", "date_done"])
|
||||
|
||||
# 4. Recover graph_data_ready if provider data still exists
|
||||
recover_graph_data_ready(fresh_scan)
|
||||
|
||||
logger.info(f"Cleaned up stale scan {scan_id_str}: {reason}")
|
||||
return True
|
||||
@@ -63,11 +63,9 @@ INTERNAL_LABELS: list[str] = [
|
||||
]
|
||||
|
||||
# Provider isolation properties
|
||||
PROVIDER_ID_PROPERTY = "_provider_id"
|
||||
PROVIDER_ELEMENT_ID_PROPERTY = "_provider_element_id"
|
||||
|
||||
PROVIDER_ISOLATION_PROPERTIES: list[str] = [
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
]
|
||||
|
||||
|
||||
@@ -88,34 +88,41 @@ def starting_attack_paths_scan(
|
||||
)
|
||||
|
||||
|
||||
def _mark_scan_finished(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
state: StateChoices,
|
||||
ingestion_exceptions: dict[str, Any],
|
||||
) -> None:
|
||||
"""Set terminal fields on a scan. Caller must be inside a transaction."""
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
duration = (
|
||||
int((now - attack_paths_scan.started_at).total_seconds())
|
||||
if attack_paths_scan.started_at
|
||||
else 0
|
||||
)
|
||||
attack_paths_scan.state = state
|
||||
attack_paths_scan.progress = 100
|
||||
attack_paths_scan.completed_at = now
|
||||
attack_paths_scan.duration = duration
|
||||
attack_paths_scan.ingestion_exceptions = ingestion_exceptions
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"state",
|
||||
"progress",
|
||||
"completed_at",
|
||||
"duration",
|
||||
"ingestion_exceptions",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def finish_attack_paths_scan(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
state: StateChoices,
|
||||
ingestion_exceptions: dict[str, Any],
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
duration = (
|
||||
int((now - attack_paths_scan.started_at).total_seconds())
|
||||
if attack_paths_scan.started_at
|
||||
else 0
|
||||
)
|
||||
|
||||
attack_paths_scan.state = state
|
||||
attack_paths_scan.progress = 100
|
||||
attack_paths_scan.completed_at = now
|
||||
attack_paths_scan.duration = duration
|
||||
attack_paths_scan.ingestion_exceptions = ingestion_exceptions
|
||||
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"state",
|
||||
"progress",
|
||||
"completed_at",
|
||||
"duration",
|
||||
"ingestion_exceptions",
|
||||
]
|
||||
)
|
||||
_mark_scan_finished(attack_paths_scan, state, ingestion_exceptions)
|
||||
|
||||
|
||||
def update_attack_paths_scan_progress(
|
||||
@@ -194,25 +201,26 @@ def fail_attack_paths_scan(
|
||||
Used as a safety net when the Celery task fails outside the job's own error handling.
|
||||
"""
|
||||
attack_paths_scan = retrieve_attack_paths_scan(tenant_id, scan_id)
|
||||
if attack_paths_scan and attack_paths_scan.state not in (
|
||||
StateChoices.COMPLETED,
|
||||
StateChoices.FAILED,
|
||||
):
|
||||
tmp_db_name = graph_database.get_database_name(
|
||||
attack_paths_scan.id, temporary=True
|
||||
if not attack_paths_scan:
|
||||
return
|
||||
|
||||
tmp_db_name = graph_database.get_database_name(attack_paths_scan.id, temporary=True)
|
||||
try:
|
||||
graph_database.drop_database(tmp_db_name)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to drop temp database {tmp_db_name} during failure handling"
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
try:
|
||||
graph_database.drop_database(tmp_db_name)
|
||||
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to drop temp database {tmp_db_name} during failure handling"
|
||||
fresh = ProwlerAPIAttackPathsScan.objects.select_for_update().get(
|
||||
id=attack_paths_scan.id
|
||||
)
|
||||
except ProwlerAPIAttackPathsScan.DoesNotExist:
|
||||
return
|
||||
if fresh.state in (StateChoices.COMPLETED, StateChoices.FAILED):
|
||||
return
|
||||
_mark_scan_finished(fresh, StateChoices.FAILED, {"global_error": error})
|
||||
|
||||
finish_attack_paths_scan(
|
||||
attack_paths_scan,
|
||||
StateChoices.FAILED,
|
||||
{"global_error": error},
|
||||
)
|
||||
|
||||
recover_graph_data_ready(attack_paths_scan)
|
||||
recover_graph_data_ready(fresh)
|
||||
|
||||
@@ -22,7 +22,6 @@ from tasks.jobs.attack_paths.config import (
|
||||
get_provider_resource_label,
|
||||
get_root_node_label,
|
||||
)
|
||||
from tasks.jobs.attack_paths.indexes import IndexType, create_indexes
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
ADD_RESOURCE_LABEL_TEMPLATE,
|
||||
CLEANUP_FINDINGS_TEMPLATE,
|
||||
@@ -84,11 +83,6 @@ def _to_neo4j_dict(record: dict[str, Any], resource_uid: str) -> dict[str, Any]:
|
||||
# ----------
|
||||
|
||||
|
||||
def create_findings_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create indexes for Prowler findings and resource lookups."""
|
||||
create_indexes(neo4j_session, IndexType.FINDINGS)
|
||||
|
||||
|
||||
def analysis(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
@@ -196,7 +190,6 @@ def cleanup_findings(
|
||||
) -> None:
|
||||
"""Remove stale findings (classic Cartography behaviour)."""
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from enum import Enum
|
||||
|
||||
import neo4j
|
||||
|
||||
from cartography.client.core.tx import run_write_query
|
||||
@@ -9,20 +7,12 @@ from tasks.jobs.attack_paths.config import (
|
||||
INTERNET_NODE_LABEL,
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
class IndexType(Enum):
|
||||
"""Types of indexes that can be created."""
|
||||
|
||||
FINDINGS = "findings"
|
||||
SYNC = "sync"
|
||||
|
||||
|
||||
# Indexes for Prowler findings and resource lookups
|
||||
FINDINGS_INDEX_STATEMENTS = [
|
||||
# Resource indexes for Prowler Finding lookups
|
||||
@@ -30,7 +20,6 @@ FINDINGS_INDEX_STATEMENTS = [
|
||||
"CREATE INDEX aws_resource_id IF NOT EXISTS FOR (n:_AWSResource) ON (n.id);",
|
||||
# Prowler Finding indexes
|
||||
f"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.id);",
|
||||
f"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.provider_uid);",
|
||||
f"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.lastupdated);",
|
||||
f"CREATE INDEX prowler_finding_status IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.status);",
|
||||
# Internet node index for MERGE lookups
|
||||
@@ -40,30 +29,18 @@ FINDINGS_INDEX_STATEMENTS = [
|
||||
# Indexes for provider resource sync operations
|
||||
SYNC_INDEX_STATEMENTS = [
|
||||
f"CREATE INDEX provider_resource_element_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.{PROVIDER_ELEMENT_ID_PROPERTY});",
|
||||
f"CREATE INDEX provider_resource_provider_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.{PROVIDER_ID_PROPERTY});",
|
||||
]
|
||||
|
||||
|
||||
def create_indexes(neo4j_session: neo4j.Session, index_type: IndexType) -> None:
|
||||
"""
|
||||
Create indexes for the specified type.
|
||||
|
||||
Args:
|
||||
`neo4j_session`: The Neo4j session to use
|
||||
`index_type`: The type of indexes to create (FINDINGS or SYNC)
|
||||
"""
|
||||
if index_type == IndexType.FINDINGS:
|
||||
logger.info("Creating indexes for Prowler Findings node types")
|
||||
for statement in FINDINGS_INDEX_STATEMENTS:
|
||||
run_write_query(neo4j_session, statement)
|
||||
|
||||
elif index_type == IndexType.SYNC:
|
||||
logger.info("Ensuring ProviderResource indexes exist")
|
||||
for statement in SYNC_INDEX_STATEMENTS:
|
||||
neo4j_session.run(statement)
|
||||
def create_findings_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create indexes for Prowler findings and resource lookups."""
|
||||
logger.info("Creating indexes for Prowler Findings node types")
|
||||
for statement in FINDINGS_INDEX_STATEMENTS:
|
||||
run_write_query(neo4j_session, statement)
|
||||
|
||||
|
||||
def create_all_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create all indexes (both findings and sync)."""
|
||||
create_indexes(neo4j_session, IndexType.FINDINGS)
|
||||
create_indexes(neo4j_session, IndexType.SYNC)
|
||||
def create_sync_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create indexes for provider resource sync operations."""
|
||||
logger.info("Ensuring ProviderResource indexes exist")
|
||||
for statement in SYNC_INDEX_STATEMENTS:
|
||||
neo4j_session.run(statement)
|
||||
|
||||
@@ -3,7 +3,6 @@ from tasks.jobs.attack_paths.config import (
|
||||
INTERNET_NODE_LABEL,
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
@@ -62,7 +61,6 @@ INSERT_FINDING_TEMPLATE = f"""
|
||||
finding.check_title = finding_data.check_title,
|
||||
finding.muted = finding_data.muted,
|
||||
finding.muted_reason = finding_data.muted_reason,
|
||||
finding.provider_uid = $provider_uid,
|
||||
finding.firstseen = timestamp(),
|
||||
finding.lastupdated = $last_updated,
|
||||
finding._module_name = 'cartography:prowler',
|
||||
@@ -74,7 +72,6 @@ INSERT_FINDING_TEMPLATE = f"""
|
||||
|
||||
MERGE (resource)-[rel:HAS_FINDING]->(finding)
|
||||
ON CREATE SET
|
||||
rel.provider_uid = $provider_uid,
|
||||
rel.firstseen = timestamp(),
|
||||
rel.lastupdated = $last_updated,
|
||||
rel._module_name = 'cartography:prowler',
|
||||
@@ -84,7 +81,7 @@ INSERT_FINDING_TEMPLATE = f"""
|
||||
"""
|
||||
|
||||
CLEANUP_FINDINGS_TEMPLATE = f"""
|
||||
MATCH (finding:{PROWLER_FINDING_LABEL} {{provider_uid: $provider_uid}})
|
||||
MATCH (finding:{PROWLER_FINDING_LABEL})
|
||||
WHERE finding.lastupdated < $last_updated
|
||||
|
||||
WITH finding LIMIT $batch_size
|
||||
@@ -155,7 +152,6 @@ NODE_SYNC_TEMPLATE = f"""
|
||||
UNWIND $rows AS row
|
||||
MERGE (n:__NODE_LABELS__ {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.provider_element_id}})
|
||||
SET n += row.props
|
||||
SET n.{PROVIDER_ID_PROPERTY} = $provider_id
|
||||
"""
|
||||
|
||||
RELATIONSHIP_SYNC_TEMPLATE = f"""
|
||||
@@ -164,5 +160,4 @@ RELATIONSHIP_SYNC_TEMPLATE = f"""
|
||||
MATCH (t:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.end_element_id}})
|
||||
MERGE (s)-[r:__REL_TYPE__ {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.provider_element_id}}]->(t)
|
||||
SET r += row.props
|
||||
SET r.{PROVIDER_ID_PROPERTY} = $provider_id
|
||||
"""
|
||||
|
||||
@@ -38,12 +38,12 @@ Pipeline steps:
|
||||
Stale findings from previous scans are cleaned up.
|
||||
|
||||
7. Sync the temp database into the tenant database:
|
||||
- Drop the old provider subgraph (matched by _provider_id property).
|
||||
- Drop the old provider subgraph (matched by dynamic _Provider_{uuid} label).
|
||||
graph_data_ready is set to False for all scans of this provider while
|
||||
the swap happens so the API doesn't serve partial data.
|
||||
- Copy nodes and relationships in batches. Every synced node gets a
|
||||
_ProviderResource label and _provider_id / _provider_element_id
|
||||
properties for multi-provider isolation.
|
||||
_ProviderResource label and dynamic _Tenant_{uuid} / _Provider_{uuid}
|
||||
isolation labels, plus a _provider_element_id property for MERGE keys.
|
||||
- Set graph_data_ready back to True.
|
||||
|
||||
8. Drop the temporary database, mark the AttackPathsScan as COMPLETED.
|
||||
@@ -62,7 +62,7 @@ from cartography.intel import analysis as cartography_analysis
|
||||
from cartography.intel import create_indexes as cartography_create_indexes
|
||||
from cartography.intel import ontology as cartography_ontology
|
||||
from celery.utils.log import get_task_logger
|
||||
from tasks.jobs.attack_paths import db_utils, findings, internet, sync, utils
|
||||
from tasks.jobs.attack_paths import db_utils, findings, indexes, internet, sync, utils
|
||||
from tasks.jobs.attack_paths.config import get_cartography_ingestion_function
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
@@ -165,7 +165,7 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
) as tmp_neo4j_session:
|
||||
# Indexes creation
|
||||
cartography_create_indexes.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
findings.create_findings_indexes(tmp_neo4j_session)
|
||||
indexes.create_findings_indexes(tmp_neo4j_session)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
|
||||
|
||||
# The real scan, where iterates over cloud services
|
||||
@@ -221,8 +221,8 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
cartography_create_indexes.run(
|
||||
tenant_neo4j_session, tenant_cartography_config
|
||||
)
|
||||
findings.create_findings_indexes(tenant_neo4j_session)
|
||||
sync.create_sync_indexes(tenant_neo4j_session)
|
||||
indexes.create_findings_indexes(tenant_neo4j_session)
|
||||
indexes.create_sync_indexes(tenant_neo4j_session)
|
||||
|
||||
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
|
||||
db_utils.set_provider_graph_data_ready(attack_paths_scan, False)
|
||||
|
||||
@@ -10,8 +10,6 @@ from typing import Any
|
||||
|
||||
import neo4j
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
PROVIDER_ISOLATION_PROPERTIES,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
@@ -19,7 +17,6 @@ from tasks.jobs.attack_paths.config import (
|
||||
get_provider_label,
|
||||
get_tenant_label,
|
||||
)
|
||||
from tasks.jobs.attack_paths.indexes import IndexType, create_indexes
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
NODE_FETCH_QUERY,
|
||||
NODE_SYNC_TEMPLATE,
|
||||
@@ -28,14 +25,11 @@ from tasks.jobs.attack_paths.queries import (
|
||||
render_cypher_template,
|
||||
)
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def create_sync_indexes(neo4j_session) -> None:
|
||||
"""Create indexes for provider resource sync operations."""
|
||||
create_indexes(neo4j_session, IndexType.SYNC)
|
||||
|
||||
|
||||
def sync_graph(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
@@ -81,8 +75,8 @@ def sync_nodes(
|
||||
"""
|
||||
Sync nodes from source to target database.
|
||||
|
||||
Adds `_ProviderResource` label and `_provider_id` property to all nodes.
|
||||
Also adds dynamic `_Tenant_{id}` and `_Provider_{id}` isolation labels.
|
||||
Adds `_ProviderResource` label and dynamic `_Tenant_{id}` and `_Provider_{id}`
|
||||
isolation labels to all nodes.
|
||||
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
@@ -119,13 +113,7 @@ def sync_nodes(
|
||||
query = render_cypher_template(
|
||||
NODE_SYNC_TEMPLATE, {"__NODE_LABELS__": node_labels}
|
||||
)
|
||||
target_session.run(
|
||||
query,
|
||||
{
|
||||
"rows": batch,
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
target_session.run(query, {"rows": batch})
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
@@ -143,7 +131,7 @@ def sync_relationships(
|
||||
"""
|
||||
Sync relationships from source to target database.
|
||||
|
||||
Adds `_provider_id` property to all relationships.
|
||||
Matches source and target nodes by `_provider_element_id` in the tenant database.
|
||||
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
@@ -174,13 +162,7 @@ def sync_relationships(
|
||||
query = render_cypher_template(
|
||||
RELATIONSHIP_SYNC_TEMPLATE, {"__REL_TYPE__": rel_type}
|
||||
)
|
||||
target_session.run(
|
||||
query,
|
||||
{
|
||||
"rows": batch,
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
target_session.run(query, {"rows": batch})
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
|
||||
@@ -677,6 +677,7 @@ def _process_finding_micro_batch(
|
||||
|
||||
# Create finding object (don't save yet)
|
||||
check_metadata = finding.get_metadata()
|
||||
check_metadata["compliance"] = finding.compliance
|
||||
finding_instance = Finding(
|
||||
tenant_id=tenant_id,
|
||||
uid=finding_uid,
|
||||
@@ -852,6 +853,22 @@ def perform_prowler_scan(
|
||||
scan_instance.started_at = datetime.now(tz=timezone.utc)
|
||||
scan_instance.save()
|
||||
|
||||
# Enrich Sentry context for all downstream errors (Layer 2: app-only tags)
|
||||
from prowler.lib.logger import (
|
||||
prowler_provider_uid_var,
|
||||
prowler_scan_id_var,
|
||||
prowler_tenant_id_var,
|
||||
)
|
||||
|
||||
prowler_tenant_id_var.set(str(tenant_id))
|
||||
prowler_scan_id_var.set(str(scan_id))
|
||||
prowler_provider_uid_var.set(str(provider_instance.uid))
|
||||
|
||||
sentry_sdk.set_tag("provider", str(provider_instance.provider))
|
||||
sentry_sdk.set_tag("tenant_id", str(tenant_id))
|
||||
sentry_sdk.set_tag("scan_id", str(scan_id))
|
||||
sentry_sdk.set_tag("provider_uid", str(provider_instance.uid))
|
||||
|
||||
# Find the mutelist processor if it exists
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
try:
|
||||
@@ -1887,7 +1904,8 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
|
||||
inserted_at=summary_timestamp,
|
||||
updated_at=updated_at,
|
||||
check_title=metadata.get("checktitle", ""),
|
||||
check_description=metadata.get("Description", ""),
|
||||
check_description=metadata.get("description", "")
|
||||
or metadata.get("Description", ""),
|
||||
severity_order=row["severity_order"] or 1,
|
||||
pass_count=row["pass_count"],
|
||||
fail_count=row["fail_count"],
|
||||
|
||||
@@ -11,8 +11,9 @@ from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.attack_paths import (
|
||||
attack_paths_scan,
|
||||
can_provider_run_attack_paths_scan,
|
||||
db_utils as attack_paths_db_utils,
|
||||
)
|
||||
from tasks.jobs.attack_paths import db_utils as attack_paths_db_utils
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
from tasks.jobs.backfill import (
|
||||
backfill_compliance_summaries,
|
||||
backfill_daily_severity_summaries,
|
||||
@@ -406,6 +407,11 @@ def perform_attack_paths_scan_task(self, tenant_id: str, scan_id: str):
|
||||
)
|
||||
|
||||
|
||||
@shared_task(name="attack-paths-cleanup-stale-scans", queue="attack-paths-scans")
|
||||
def cleanup_stale_attack_paths_scans_task():
|
||||
return cleanup_stale_attack_paths_scans()
|
||||
|
||||
|
||||
@shared_task(name="tenant-deletion", queue="deletion", autoretry_for=(Exception,))
|
||||
def delete_tenant_task(tenant_id: str):
|
||||
return delete_tenant(pk=tenant_id)
|
||||
@@ -760,6 +766,33 @@ def aggregate_finding_group_summaries_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_finding_group_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask, name="reaggregate-all-finding-group-summaries", queue="overview"
|
||||
)
|
||||
@set_tenant(keep_tenant=True)
|
||||
def reaggregate_all_finding_group_summaries_task(tenant_id: str):
|
||||
"""Reaggregate finding group summaries for all providers' latest completed scans."""
|
||||
latest_scan_ids = list(
|
||||
Scan.objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-completed_at", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
if latest_scan_ids:
|
||||
logger.info(
|
||||
"Reaggregating finding group summaries for %d scans: %s",
|
||||
len(latest_scan_ids),
|
||||
latest_scan_ids,
|
||||
)
|
||||
group(
|
||||
aggregate_finding_group_summaries_task.si(
|
||||
tenant_id=tenant_id, scan_id=str(scan_id)
|
||||
)
|
||||
for scan_id in latest_scan_ids
|
||||
).apply_async()
|
||||
return {"scans_reaggregated": len(latest_scan_ids)}
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="lighthouse-connection-check")
|
||||
@set_tenant
|
||||
def check_lighthouse_connection_task(lighthouse_config_id: str, tenant_id: str = None):
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
from contextlib import nullcontext
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, call, patch
|
||||
|
||||
import pytest
|
||||
from django_celery_results.models import TaskResult
|
||||
from tasks.jobs.attack_paths import findings as findings_module
|
||||
from tasks.jobs.attack_paths import indexes as indexes_module
|
||||
from tasks.jobs.attack_paths import internet as internet_module
|
||||
from tasks.jobs.attack_paths import sync as sync_module
|
||||
from tasks.jobs.attack_paths.scan import run as attack_paths_run
|
||||
@@ -17,6 +20,7 @@ from api.models import (
|
||||
Scan,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
Task,
|
||||
)
|
||||
from prowler.lib.check.models import Severity
|
||||
|
||||
@@ -36,10 +40,10 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@@ -186,7 +190,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.create_database")
|
||||
@@ -285,7 +289,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.create_database")
|
||||
@@ -388,7 +392,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.create_database")
|
||||
@@ -490,10 +494,10 @@ class TestAttackPathsRun:
|
||||
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
|
||||
side_effect=RuntimeError("drop failed"),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@@ -603,10 +607,10 @@ class TestAttackPathsRun:
|
||||
side_effect=RuntimeError("sync failed"),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@@ -716,10 +720,10 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@@ -834,10 +838,10 @@ class TestAttackPathsRun:
|
||||
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
|
||||
side_effect=RuntimeError("drop failed"),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@@ -1003,9 +1007,6 @@ class TestFailAttackPathsScan:
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.graph_database.drop_database"
|
||||
) as mock_drop_db,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch("tasks.jobs.attack_paths.db_utils.recover_graph_data_ready"),
|
||||
):
|
||||
fail_attack_paths_scan(str(tenant.id), str(scan.id), "setup exploded")
|
||||
@@ -1013,11 +1014,12 @@ class TestFailAttackPathsScan:
|
||||
mock_retrieve.assert_called_once_with(str(tenant.id), str(scan.id))
|
||||
expected_tmp_db = f"db-tmp-scan-{str(attack_paths_scan.id).lower()}"
|
||||
mock_drop_db.assert_called_once_with(expected_tmp_db)
|
||||
mock_finish.assert_called_once_with(
|
||||
attack_paths_scan,
|
||||
StateChoices.FAILED,
|
||||
{"global_error": "setup exploded"},
|
||||
)
|
||||
|
||||
attack_paths_scan.refresh_from_db()
|
||||
assert attack_paths_scan.state == StateChoices.FAILED
|
||||
assert attack_paths_scan.ingestion_exceptions == {
|
||||
"global_error": "setup exploded"
|
||||
}
|
||||
|
||||
def test_drops_temp_database_even_when_drop_fails(
|
||||
self, tenants_fixture, providers_fixture, scans_fixture
|
||||
@@ -1048,18 +1050,12 @@ class TestFailAttackPathsScan:
|
||||
"tasks.jobs.attack_paths.db_utils.graph_database.drop_database",
|
||||
side_effect=Exception("Neo4j unreachable"),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch("tasks.jobs.attack_paths.db_utils.recover_graph_data_ready"),
|
||||
):
|
||||
fail_attack_paths_scan(str(tenant.id), str(scan.id), "setup exploded")
|
||||
|
||||
mock_finish.assert_called_once_with(
|
||||
attack_paths_scan,
|
||||
StateChoices.FAILED,
|
||||
{"global_error": "setup exploded"},
|
||||
)
|
||||
attack_paths_scan.refresh_from_db()
|
||||
assert attack_paths_scan.state == StateChoices.FAILED
|
||||
|
||||
def test_skips_already_failed_scan(
|
||||
self, tenants_fixture, providers_fixture, scans_fixture
|
||||
@@ -1089,33 +1085,25 @@ class TestFailAttackPathsScan:
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.graph_database.drop_database"
|
||||
) as mock_drop_db,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
):
|
||||
fail_attack_paths_scan(str(tenant.id), str(scan.id), "setup exploded")
|
||||
|
||||
mock_drop_db.assert_not_called()
|
||||
mock_finish.assert_not_called()
|
||||
mock_drop_db.assert_called_once()
|
||||
|
||||
attack_paths_scan.refresh_from_db()
|
||||
assert attack_paths_scan.state == StateChoices.FAILED
|
||||
|
||||
def test_skips_when_no_scan_found(self, tenants_fixture):
|
||||
from tasks.jobs.attack_paths.db_utils import fail_attack_paths_scan
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=None,
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
with patch(
|
||||
"tasks.jobs.attack_paths.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=None,
|
||||
):
|
||||
fail_attack_paths_scan(str(tenant.id), "nonexistent", "setup exploded")
|
||||
|
||||
mock_finish.assert_not_called()
|
||||
|
||||
def test_fail_recovers_graph_data_ready_when_data_exists(
|
||||
self, tenants_fixture, providers_fixture, scans_fixture
|
||||
):
|
||||
@@ -1142,7 +1130,6 @@ class TestFailAttackPathsScan:
|
||||
return_value=attack_paths_scan,
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.db_utils.graph_database.drop_database"),
|
||||
patch("tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.graph_database.has_provider_data",
|
||||
return_value=True,
|
||||
@@ -1181,7 +1168,6 @@ class TestFailAttackPathsScan:
|
||||
return_value=attack_paths_scan,
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.db_utils.graph_database.drop_database"),
|
||||
patch("tasks.jobs.attack_paths.db_utils.finish_attack_paths_scan"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.db_utils.graph_database.has_provider_data",
|
||||
return_value=False,
|
||||
@@ -1265,7 +1251,7 @@ class TestAttackPathsFindingsHelpers:
|
||||
def test_create_findings_indexes_executes_all_statements(self):
|
||||
mock_session = MagicMock()
|
||||
with patch("tasks.jobs.attack_paths.indexes.run_write_query") as mock_run_write:
|
||||
findings_module.create_findings_indexes(mock_session)
|
||||
indexes_module.create_findings_indexes(mock_session)
|
||||
|
||||
from tasks.jobs.attack_paths.indexes import FINDINGS_INDEX_STATEMENTS
|
||||
|
||||
@@ -1327,7 +1313,6 @@ class TestAttackPathsFindingsHelpers:
|
||||
|
||||
assert mock_session.run.call_count == 2
|
||||
params = mock_session.run.call_args.args[1]
|
||||
assert params["provider_uid"] == str(provider.uid)
|
||||
assert params["last_updated"] == config.update_tag
|
||||
|
||||
def test_stream_findings_with_resources_returns_latest_scan_data(
|
||||
@@ -2317,3 +2302,374 @@ class TestAttackPathsDbUtilsGraphDataReady:
|
||||
ap_scan_b.refresh_from_db()
|
||||
assert ap_scan_a.graph_data_ready is False
|
||||
assert ap_scan_b.graph_data_ready is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCleanupStaleAttackPathsScans:
|
||||
def _create_executing_scan(
|
||||
self, tenant, provider, scan=None, started_at=None, worker=None
|
||||
):
|
||||
"""Helper to create an EXECUTING AttackPathsScan with optional Task+TaskResult."""
|
||||
ap_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
scan=scan,
|
||||
state=StateChoices.EXECUTING,
|
||||
started_at=started_at or datetime.now(tz=timezone.utc),
|
||||
)
|
||||
|
||||
task_result = None
|
||||
if worker is not None:
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id=str(ap_scan.id),
|
||||
task_name="attack-paths-scan-perform",
|
||||
status="STARTED",
|
||||
worker=worker,
|
||||
)
|
||||
task = Task.objects.create(
|
||||
id=task_result.task_id,
|
||||
task_runner_task=task_result,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
ap_scan.task = task
|
||||
ap_scan.save(update_fields=["task_id"])
|
||||
|
||||
return ap_scan, task_result
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=False)
|
||||
def test_cleans_up_scan_with_dead_worker(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
# Recent scan — should still be cleaned up because worker is dead
|
||||
ap_scan, task_result = self._create_executing_scan(
|
||||
tenant, provider, worker="dead-worker@host"
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
assert str(ap_scan.id) in result["scan_ids"]
|
||||
mock_drop_db.assert_called_once()
|
||||
mock_recover.assert_called_once()
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.FAILED
|
||||
assert ap_scan.progress == 100
|
||||
assert ap_scan.completed_at is not None
|
||||
assert ap_scan.ingestion_exceptions == {
|
||||
"global_error": "Worker dead — cleaned up by periodic task"
|
||||
}
|
||||
|
||||
task_result.refresh_from_db()
|
||||
assert task_result.status == "FAILURE"
|
||||
assert task_result.date_done is not None
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._revoke_task")
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=True)
|
||||
def test_revokes_and_cleans_scan_exceeding_threshold_on_live_worker(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_revoke,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
old_start = datetime.now(tz=timezone.utc) - timedelta(hours=49)
|
||||
ap_scan, task_result = self._create_executing_scan(
|
||||
tenant, provider, started_at=old_start, worker="live-worker@host"
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
mock_revoke.assert_called_once_with(task_result)
|
||||
mock_recover.assert_called_once()
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.FAILED
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=True)
|
||||
def test_ignores_recent_executing_scans_on_live_worker(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
# Recent scan on live worker — should be skipped
|
||||
self._create_executing_scan(tenant, provider, worker="live-worker@host")
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 0
|
||||
mock_drop_db.assert_not_called()
|
||||
mock_recover.assert_not_called()
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
def test_ignores_completed_and_failed_scans(
|
||||
self,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
state=StateChoices.FAILED,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 0
|
||||
mock_drop_db.assert_not_called()
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.graph_database.drop_database",
|
||||
side_effect=Exception("Neo4j unreachable"),
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=False)
|
||||
def test_handles_drop_database_failure_gracefully(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
self._create_executing_scan(tenant, provider, worker="dead-worker@host")
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
mock_drop_db.assert_called_once()
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=False)
|
||||
def test_cross_tenant_cleanup(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant1 = tenants_fixture[0]
|
||||
tenant2 = tenants_fixture[1]
|
||||
provider1 = providers_fixture[0]
|
||||
provider1.provider = Provider.ProviderChoices.AWS
|
||||
provider1.save()
|
||||
|
||||
provider2 = Provider.objects.create(
|
||||
provider="aws",
|
||||
uid="999888777666",
|
||||
alias="aws_tenant2",
|
||||
tenant_id=tenant2.id,
|
||||
)
|
||||
|
||||
ap_scan1, _ = self._create_executing_scan(
|
||||
tenant1, provider1, worker="dead-worker-1@host"
|
||||
)
|
||||
ap_scan2, _ = self._create_executing_scan(
|
||||
tenant2, provider2, worker="dead-worker-2@host"
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 2
|
||||
assert mock_recover.call_count == 2
|
||||
|
||||
ap_scan1.refresh_from_db()
|
||||
ap_scan2.refresh_from_db()
|
||||
assert ap_scan1.state == StateChoices.FAILED
|
||||
assert ap_scan2.state == StateChoices.FAILED
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=False)
|
||||
def test_recovers_graph_data_ready_for_stale_scan(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
ap_scan, _ = self._create_executing_scan(
|
||||
tenant, provider, worker="dead-worker@host"
|
||||
)
|
||||
|
||||
cleanup_stale_attack_paths_scans()
|
||||
|
||||
mock_recover.assert_called_once()
|
||||
recovered_scan = mock_recover.call_args[0][0]
|
||||
assert recovered_scan.id == ap_scan.id
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
def test_fallback_to_time_heuristic_when_no_worker_field(
|
||||
self,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
# Old scan with no Task/TaskResult
|
||||
old_start = datetime.now(tz=timezone.utc) - timedelta(hours=49)
|
||||
ap_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
state=StateChoices.EXECUTING,
|
||||
started_at=old_start,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.FAILED
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._is_worker_alive", return_value=False)
|
||||
def test_shared_worker_is_pinged_only_once(
|
||||
self,
|
||||
mock_alive,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
# Two scans on the same dead worker
|
||||
self._create_executing_scan(tenant, provider, worker="shared-worker@host")
|
||||
self._create_executing_scan(tenant, provider, worker="shared-worker@host")
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 2
|
||||
# Worker should be pinged exactly once — cache prevents second ping
|
||||
mock_alive.assert_called_once_with("shared-worker@host")
|
||||
|
||||
@@ -1411,7 +1411,9 @@ class TestProcessFindingMicroBatch:
|
||||
assert created_finding.status == StatusChoices.PASS
|
||||
assert created_finding.delta == Finding.DeltaChoices.NEW
|
||||
assert created_finding.muted is False
|
||||
assert created_finding.check_metadata == finding.metadata
|
||||
expected_metadata = {**finding.metadata, "compliance": finding.compliance}
|
||||
assert created_finding.check_metadata == expected_metadata
|
||||
assert created_finding.check_metadata["compliance"] == finding.compliance
|
||||
assert created_finding.resource_regions == [finding.region]
|
||||
assert created_finding.resource_services == [finding.service_name]
|
||||
assert created_finding.resource_types == [finding.resource_type]
|
||||
|
||||
@@ -20,6 +20,7 @@ from tasks.tasks import (
|
||||
generate_outputs_task,
|
||||
perform_attack_paths_scan_task,
|
||||
perform_scheduled_scan_task,
|
||||
reaggregate_all_finding_group_summaries_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
s3_integration_task,
|
||||
security_hub_integration_task,
|
||||
@@ -2351,3 +2352,47 @@ class TestPerformScheduledScanTask:
|
||||
).count()
|
||||
== 1
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestReaggregateAllFindingGroupSummaries:
|
||||
def setup_method(self):
|
||||
self.tenant_id = str(uuid.uuid4())
|
||||
|
||||
@patch("tasks.tasks.group")
|
||||
@patch("tasks.tasks.aggregate_finding_group_summaries_task")
|
||||
@patch("tasks.tasks.Scan.objects.filter")
|
||||
def test_dispatches_subtasks_for_each_provider(
|
||||
self, mock_scan_filter, mock_agg_task, mock_group
|
||||
):
|
||||
scan_id_1 = uuid.uuid4()
|
||||
scan_id_2 = uuid.uuid4()
|
||||
mock_group_result = MagicMock()
|
||||
mock_group.side_effect = lambda gen: (list(gen), mock_group_result)[1]
|
||||
|
||||
mock_scan_filter.return_value.order_by.return_value.distinct.return_value.values_list.return_value = [
|
||||
scan_id_1,
|
||||
scan_id_2,
|
||||
]
|
||||
|
||||
result = reaggregate_all_finding_group_summaries_task(tenant_id=self.tenant_id)
|
||||
|
||||
assert result == {"scans_reaggregated": 2}
|
||||
assert mock_agg_task.si.call_count == 2
|
||||
mock_agg_task.si.assert_any_call(
|
||||
tenant_id=self.tenant_id, scan_id=str(scan_id_1)
|
||||
)
|
||||
mock_agg_task.si.assert_any_call(
|
||||
tenant_id=self.tenant_id, scan_id=str(scan_id_2)
|
||||
)
|
||||
mock_group_result.apply_async.assert_called_once()
|
||||
|
||||
@patch("tasks.tasks.group")
|
||||
@patch("tasks.tasks.Scan.objects.filter")
|
||||
def test_no_completed_scans_skips_dispatch(self, mock_scan_filter, mock_group):
|
||||
mock_scan_filter.return_value.order_by.return_value.distinct.return_value.values_list.return_value = []
|
||||
|
||||
result = reaggregate_all_finding_group_summaries_task(tenant_id=self.tenant_id)
|
||||
|
||||
assert result == {"scans_reaggregated": 0}
|
||||
mock_group.assert_not_called()
|
||||
|
||||
@@ -39,6 +39,9 @@ secrets:
|
||||
POSTGRES_PASSWORD:
|
||||
POSTGRES_DB:
|
||||
# Valkey settings
|
||||
VALKEY_SCHEME: redis
|
||||
VALKEY_USERNAME:
|
||||
VALKEY_PASSWORD:
|
||||
VALKEY_HOST: valkey-headless
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
|
||||
@@ -7,6 +7,9 @@ metadata:
|
||||
{{- include "prowler.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
VALKEY_SCHEME: {{ .Values.valkey.scheme | default "redis" | quote }}
|
||||
VALKEY_USERNAME: {{ .Values.valkey.username | default "" | quote }}
|
||||
VALKEY_PASSWORD: {{ .Values.valkey.password | default "" | quote }}
|
||||
VALKEY_HOST: "{{ include "prowler.fullname" . }}-valkey"
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
|
||||
@@ -529,6 +529,9 @@ postgresql:
|
||||
valkey:
|
||||
# If enabled, it will create a Secret with the following.
|
||||
# Otherwise, create a secret with
|
||||
# - VALKEY_SCHEME
|
||||
# - VALKEY_USERNAME
|
||||
# - VALKEY_PASSWORD
|
||||
# - VALKEY_HOST
|
||||
# - VALKEY_PORT
|
||||
# - VALKEY_DB
|
||||
|
||||
@@ -79,7 +79,7 @@ Remember, our community is here to help! If you need guidance, do not hesitate t
|
||||
Before proceeding, ensure the following:
|
||||
|
||||
- Git is installed.
|
||||
- Python 3.9 or higher is installed.
|
||||
- Python 3.10 or higher is installed.
|
||||
- `poetry` is installed to manage dependencies.
|
||||
|
||||
### Forking the Prowler Repository
|
||||
|
||||
@@ -1249,7 +1249,7 @@ Dependencies ensure that your provider's required libraries are available when P
|
||||
|
||||
```toml
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
python = ">=3.10,<3.13"
|
||||
# ... other dependencies
|
||||
your-sdk-library = "^1.0.0" # Add your SDK dependency
|
||||
```
|
||||
|
||||
@@ -296,6 +296,13 @@
|
||||
"user-guide/providers/openstack/getting-started-openstack",
|
||||
"user-guide/providers/openstack/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Vercel",
|
||||
"pages": [
|
||||
"user-guide/providers/vercel/getting-started-vercel",
|
||||
"user-guide/providers/vercel/authentication"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -121,8 +121,8 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.21.0"
|
||||
PROWLER_API_VERSION="5.21.0"
|
||||
PROWLER_UI_VERSION="5.22.0"
|
||||
PROWLER_API_VERSION="5.22.0"
|
||||
```
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -4,7 +4,7 @@ title: 'Installation'
|
||||
|
||||
## Installation
|
||||
|
||||
To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/):
|
||||
To install Prowler as a Python package, use `Python >= 3.10, <= 3.12`. Prowler is available as a project in [PyPI](https://pypi.org/project/prowler/):
|
||||
|
||||
<Tabs>
|
||||
<Tab title="pipx">
|
||||
@@ -12,7 +12,7 @@ To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* `Python >= 3.10, <= 3.12`
|
||||
* `pipx` installed: [pipx installation](https://pipx.pypa.io/stable/installation/).
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
@@ -30,7 +30,7 @@ To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* `Python >= 3.10, <= 3.12`
|
||||
* `Python pip >= 21.0.0`
|
||||
* AWS, GCP, Azure, M365 and/or Kubernetes credentials
|
||||
|
||||
@@ -87,7 +87,7 @@ To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is
|
||||
<Tab title="Amazon Linux 2">
|
||||
_Requirements_:
|
||||
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* `Python >= 3.10, <= 3.12`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
@@ -102,8 +102,8 @@ To install Prowler as a Python package, use `Python >= 3.9, <= 3.12`. Prowler is
|
||||
<Tab title="Ubuntu">
|
||||
_Requirements_:
|
||||
|
||||
* `Ubuntu 23.04` or above. For older Ubuntu versions, check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure `Python >= 3.9, <= 3.12` is installed.
|
||||
* `Python >= 3.9, <= 3.12`
|
||||
* `Ubuntu 23.04` or above. For older Ubuntu versions, check [pipx installation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#__tabbed_1_1) and ensure `Python >= 3.10, <= 3.12` is installed.
|
||||
* `Python >= 3.10, <= 3.12`
|
||||
* AWS, GCP, Azure and/or Kubernetes credentials
|
||||
|
||||
_Commands_:
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 323 KiB After Width: | Height: | Size: 318 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user