Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2cb8179477 | |||
| c9bbe7033b | |||
| 76ecb30968 | |||
| 84a60fe06b | |||
| f71743b95b | |||
| 68dcc5a75c | |||
| 407ae24f04 | |||
| 17c4a286af | |||
| 69ee2cdcef | |||
| 3544ff5e75 | |||
| 69287dc3a1 | |||
| cf5848d11d | |||
| 8ead3fa6bb | |||
| 21483cc12f | |||
| 628de4bd06 | |||
| 043f1ef138 | |||
| a120da9409 | |||
| d5b71c6436 | |||
| 9114d09ba5 | |||
| d2b1224a30 | |||
| 54b54e25e2 | |||
| 1b45724ca8 | |||
| ba5b23245f | |||
| 43913b1592 | |||
| 9e31160887 | |||
| 9a0c73256e | |||
| 2a160a10df | |||
| 8d8bee165b | |||
| 606efec9f8 | |||
| d5354e8b1d | |||
| a96e5890dc | |||
| bb81c5dd2d | |||
| c3acb818d9 | |||
| e6fc59267b | |||
| 62f114f5d0 | |||
| 392ffd5a60 | |||
| 507b0882d5 | |||
| 89d72cf8fd | |||
| f3a042933f | |||
| 96e7d6cb3a | |||
| a82eaa885d | |||
| 90a619a8b4 | |||
| 638bf62d76 | |||
| 962615ca1f | |||
| 5610f5ad90 | |||
| be6fe1db04 | |||
| 92b838866a | |||
| 51591cb8cd | |||
| e24e1ab771 | |||
| bc3fd79457 | |||
| 4941ed5797 | |||
| 0f4d8ff891 | |||
| d1ab8b8ae5 | |||
| 65e9593b41 | |||
| 131112398b | |||
| c952ea018e | |||
| 31b645ee53 | |||
| 0123e603d8 | |||
| b65265da4b | |||
| 1335332fe9 | |||
| f37a2a1efe | |||
| 3e0e1398c4 | |||
| a4ad9ba01f | |||
| c6d5f44c5e | |||
| 5d24a41625 |
@@ -145,7 +145,7 @@ SENTRY_RELEASE=local
|
||||
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.16.0
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.24.1
|
||||
|
||||
# Social login credentials
|
||||
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
|
||||
|
||||
@@ -13,11 +13,15 @@ inputs:
|
||||
poetry-version:
|
||||
description: 'Poetry version to install'
|
||||
required: false
|
||||
default: '2.1.1'
|
||||
default: '2.3.4'
|
||||
install-dependencies:
|
||||
description: 'Install Python dependencies with Poetry'
|
||||
required: false
|
||||
default: 'true'
|
||||
update-lock:
|
||||
description: 'Run `poetry lock` during setup. Only enable when a prior step mutates pyproject.toml (e.g. API `@master` VCS rewrite). Default: false.'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
@@ -74,7 +78,7 @@ runs:
|
||||
grep -A2 -B2 "resolved_reference" poetry.lock
|
||||
|
||||
- name: Update poetry.lock (prowler repo only)
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
if: github.repository == 'prowler-cloud/prowler' && inputs.update-lock == 'true'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: poetry lock
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
detect-release-type:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -17,6 +17,8 @@ concurrency:
|
||||
env:
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
api-code-quality:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -67,6 +69,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
update-lock: 'true'
|
||||
|
||||
- name: Poetry check
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -24,6 +24,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
api-analyze:
|
||||
name: CodeQL Security Analysis
|
||||
|
||||
@@ -33,6 +33,8 @@ env:
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-api
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -18,6 +18,8 @@ env:
|
||||
API_WORKING_DIR: ./api
|
||||
IMAGE_NAME: prowler-api
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
api-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -17,6 +17,8 @@ concurrency:
|
||||
env:
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
api-security-scans:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -70,6 +72,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
update-lock: 'true'
|
||||
|
||||
- name: Bandit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -30,6 +30,8 @@ env:
|
||||
VALKEY_DB: 0
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
api-tests:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -116,6 +118,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
update-lock: 'true'
|
||||
|
||||
- name: Run tests with pytest
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -17,6 +17,8 @@ env:
|
||||
BACKPORT_LABEL_PREFIX: backport-to-
|
||||
BACKPORT_LABEL_IGNORE: was-backported
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: github.event.pull_request.merged == true && !(contains(github.event.pull_request.labels.*.name, 'backport')) && !(contains(github.event.pull_request.labels.*.name, 'was-backported'))
|
||||
|
||||
@@ -21,6 +21,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -9,6 +9,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
update-labels:
|
||||
if: contains(github.event.issue.labels.*.name, 'status/awaiting-response')
|
||||
|
||||
@@ -16,6 +16,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
conventional-commit-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
BACKPORT_LABEL_PREFIX: backport-to-
|
||||
BACKPORT_LABEL_COLOR: B60205
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
create-label:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
detect-release-type:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -14,6 +14,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
scan-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -21,6 +21,8 @@ concurrency:
|
||||
env:
|
||||
CHART_PATH: contrib/k8s/helm/prowler-app
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
helm-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -13,6 +13,8 @@ concurrency:
|
||||
env:
|
||||
CHART_PATH: contrib/k8s/helm/prowler-app
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
release-helm-chart:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -9,6 +9,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
lock:
|
||||
if: |
|
||||
|
||||
@@ -15,6 +15,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -32,6 +32,8 @@ env:
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-mcp
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -18,6 +18,8 @@ env:
|
||||
MCP_WORKING_DIR: ./mcp_server
|
||||
IMAGE_NAME: prowler-mcp
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
mcp-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -87,6 +89,9 @@ jobs:
|
||||
api.github.com:443
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
get.trivy.dev:443
|
||||
release-assets.githubusercontent.com:443
|
||||
objects.githubusercontent.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
@@ -14,6 +14,8 @@ env:
|
||||
PYTHON_VERSION: "3.12"
|
||||
WORKING_DIRECTORY: ./mcp_server
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
validate-release:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -16,6 +16,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
check-changelog:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'no-changelog') == false
|
||||
|
||||
@@ -16,6 +16,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
check-compliance-mapping:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'no-compliance-check') == false
|
||||
|
||||
@@ -15,6 +15,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
check-conflicts:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -12,6 +12,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
trigger-cloud-pull-request:
|
||||
if: |
|
||||
|
||||
@@ -17,6 +17,8 @@ concurrency:
|
||||
env:
|
||||
PROWLER_VERSION: ${{ inputs.prowler_version }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
if: github.event_name == 'workflow_dispatch' && github.repository == 'prowler-cloud/prowler'
|
||||
@@ -38,15 +40,11 @@ jobs:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
python3 -m pip install --user poetry==2.1.1
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
install-dependencies: 'false'
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
detect-release-type:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -10,6 +10,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
check-duplicate-test-names:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -14,6 +14,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sdk-code-quality:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -69,22 +71,11 @@ jobs:
|
||||
contrib/**
|
||||
**/AGENTS.md
|
||||
|
||||
- name: Install Poetry
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
|
||||
- name: Check Poetry lock file
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -30,6 +30,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sdk-analyze:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -47,6 +47,8 @@ env:
|
||||
# AWS configuration (for ECR)
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -74,15 +76,14 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
install-dependencies: 'false'
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
pipx install poetry==2.1.1
|
||||
pipx inject poetry poetry-bumpversion
|
||||
- name: Inject poetry-bumpversion plugin
|
||||
run: pipx inject poetry poetry-bumpversion
|
||||
|
||||
- name: Get Prowler version and set tags
|
||||
id: get-prowler-version
|
||||
|
||||
@@ -17,6 +17,8 @@ concurrency:
|
||||
env:
|
||||
IMAGE_NAME: prowler
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sdk-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -85,6 +87,7 @@ jobs:
|
||||
check.trivy.dev:443
|
||||
debian.map.fastlydns.net:80
|
||||
release-assets.githubusercontent.com:443
|
||||
objects.githubusercontent.com:443
|
||||
pypi.org:443
|
||||
files.pythonhosted.org:443
|
||||
www.powershellgallery.com:443
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
PYTHON_VERSION: '3.12'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
validate-release:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -73,13 +75,11 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install Poetry
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
install-dependencies: 'false'
|
||||
|
||||
- name: Build Prowler package
|
||||
run: poetry build
|
||||
@@ -111,13 +111,11 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install Poetry
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
install-dependencies: 'false'
|
||||
|
||||
- name: Install toml package
|
||||
run: pip install toml
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
PYTHON_VERSION: '3.12'
|
||||
AWS_REGION: 'us-east-1'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
refresh-aws-regions:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -12,6 +12,8 @@ concurrency:
|
||||
env:
|
||||
PYTHON_VERSION: '3.12'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
refresh-oci-regions:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -14,6 +14,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sdk-security-scans:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -69,20 +71,11 @@ jobs:
|
||||
contrib/**
|
||||
**/AGENTS.md
|
||||
|
||||
- name: Install Poetry
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python 3.12
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry install --no-root
|
||||
|
||||
- name: Security scan with Bandit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -14,6 +14,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
sdk-tests:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -90,20 +92,11 @@ jobs:
|
||||
contrib/**
|
||||
**/AGENTS.md
|
||||
|
||||
- name: Install Poetry
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry install --no-root
|
||||
|
||||
# AWS Provider
|
||||
- name: Check if AWS files changed
|
||||
|
||||
@@ -31,6 +31,8 @@ on:
|
||||
description: "Whether there are UI E2E tests to run"
|
||||
value: ${{ jobs.analyze.outputs.has-ui-e2e }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -13,6 +13,8 @@ env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
detect-release-type:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -26,6 +26,8 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
ui-analyze:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -35,6 +35,8 @@ env:
|
||||
# Build args
|
||||
NEXT_PUBLIC_API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
|
||||
@@ -18,6 +18,8 @@ env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
IMAGE_NAME: prowler-ui
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
ui-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
@@ -89,6 +91,8 @@ jobs:
|
||||
mirror.gcr.io:443
|
||||
check.trivy.dev:443
|
||||
get.trivy.dev:443
|
||||
release-assets.githubusercontent.com:443
|
||||
objects.githubusercontent.com:443
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
@@ -15,6 +15,8 @@ on:
|
||||
- 'ui/**'
|
||||
- 'api/**' # API changes can affect UI E2E
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
# First, analyze which tests need to run
|
||||
impact-analysis:
|
||||
|
||||
@@ -18,6 +18,8 @@ env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
NODE_VERSION: '24.13.0'
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
ui-tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -84,6 +84,7 @@ continue.json
|
||||
.continuerc.json
|
||||
|
||||
# AI Coding Assistants - OpenCode
|
||||
.opencode/
|
||||
opencode.json
|
||||
|
||||
# AI Coding Assistants - GitHub Copilot
|
||||
|
||||
@@ -70,7 +70,7 @@ repos:
|
||||
args: ["--ignore=E266,W503,E203,E501,W605"]
|
||||
|
||||
- repo: https://github.com/python-poetry/poetry
|
||||
rev: 2.1.1
|
||||
rev: 2.3.4
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
name: API - poetry-check
|
||||
|
||||
@@ -13,7 +13,7 @@ build:
|
||||
post_create_environment:
|
||||
# Install poetry
|
||||
# https://python-poetry.org/docs/#installing-manually
|
||||
- python -m pip install poetry
|
||||
- python -m pip install poetry==2.3.4
|
||||
post_install:
|
||||
# Install dependencies with 'docs' dependency group
|
||||
# https://python-poetry.org/docs/managing-dependencies/#dependency-groups
|
||||
|
||||
@@ -140,7 +140,7 @@ Prowler is an open-source cloud security assessment tool supporting AWS, Azure,
|
||||
|
||||
| Component | Location | Tech Stack |
|
||||
|-----------|----------|------------|
|
||||
| SDK | `prowler/` | Python 3.10+, Poetry |
|
||||
| SDK | `prowler/` | Python 3.10+, Poetry 2.3+ |
|
||||
| API | `api/` | Django 5.1, DRF, Celery |
|
||||
| UI | `ui/` | Next.js 15, React 19, Tailwind 4 |
|
||||
| MCP Server | `mcp_server/` | FastMCP, Python 3.12+ |
|
||||
@@ -153,12 +153,12 @@ Prowler is an open-source cloud security assessment tool supporting AWS, Azure,
|
||||
```bash
|
||||
# Setup
|
||||
poetry install --with dev
|
||||
poetry run pre-commit install
|
||||
poetry run prek install
|
||||
|
||||
# Code quality
|
||||
poetry run make lint
|
||||
poetry run make format
|
||||
poetry run pre-commit run --all-files
|
||||
poetry run prek run --all-files
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -68,7 +68,7 @@ ENV HOME='/home/prowler'
|
||||
ENV PATH="${HOME}/.local/bin:${PATH}"
|
||||
#hadolint ignore=DL3013
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir poetry
|
||||
pip install --no-cache-dir poetry==2.3.4
|
||||
|
||||
RUN poetry install --compile && \
|
||||
rm -rf ~/.cache/pip
|
||||
|
||||
@@ -246,14 +246,7 @@ Some pre-commit hooks require tools installed on your system:
|
||||
|
||||
1. **Install [TruffleHog](https://github.com/trufflesecurity/trufflehog#install)** (secret scanning) — see the [official installation options](https://github.com/trufflesecurity/trufflehog#install).
|
||||
|
||||
2. **Install [Safety](https://github.com/pyupio/safety)** (dependency vulnerability checking):
|
||||
|
||||
```console
|
||||
# Requires a Python environment (e.g. via pyenv)
|
||||
pip install safety
|
||||
```
|
||||
|
||||
3. **Install [Hadolint](https://github.com/hadolint/hadolint#install)** (Dockerfile linting) — see the [official installation options](https://github.com/hadolint/hadolint#install).
|
||||
2. **Install [Hadolint](https://github.com/hadolint/hadolint#install)** (Dockerfile linting) — see the [official installation options](https://github.com/hadolint/hadolint#install).
|
||||
|
||||
## Prowler CLI
|
||||
### Pip package
|
||||
|
||||
@@ -2,6 +2,39 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.25.1] (Prowler v5.24.1)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Attack Paths: Restore `SYNC_BATCH_SIZE` and `FINDINGS_BATCH_SIZE` defaults to 1000, upgrade Cartography to 0.135.0, enable Celery queue priority for cleanup task, rewrite Finding insertion, remove AWS graph cleanup and add timing logs [(#10729)](https://github.com/prowler-cloud/prowler/pull/10729)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Finding group resources endpoints now include findings without associated resources (orphaned IaC findings) as simulated resource rows, and return one row per finding when multiple findings share a resource [(#10708)](https://github.com/prowler-cloud/prowler/pull/10708)
|
||||
- Attack Paths: Missing `tenant_id` filter while getting related findings after scan completes [(#10722)](https://github.com/prowler-cloud/prowler/pull/10722)
|
||||
- Finding group counters `pass_count`, `fail_count` and `manual_count` now exclude muted findings [(#10753)](https://github.com/prowler-cloud/prowler/pull/10753)
|
||||
- Silent data loss in `ResourceFindingMapping` bulk insert that left findings orphaned when `INSERT ... ON CONFLICT DO NOTHING` dropped rows without raising; added explicit `unique_fields` [(#10724)](https://github.com/prowler-cloud/prowler/pull/10724)
|
||||
|
||||
---
|
||||
|
||||
## [1.25.0] (Prowler v5.24.0)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Bump Poetry to `2.3.4` in Dockerfile and pre-commit hooks. Regenerate `api/poetry.lock` [(#10681)](https://github.com/prowler-cloud/prowler/pull/10681)
|
||||
- Attack Paths: Remove dead `cleanup_findings` no-op and its supporting `prowler_finding_lastupdated` index [(#10684)](https://github.com/prowler-cloud/prowler/pull/10684)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Worker-beat race condition on cold start: replaced `sleep 15` with API service healthcheck dependency (Docker Compose) and init containers (Helm), aligned Gunicorn default port to `8080` [(#10603)](https://github.com/prowler-cloud/prowler/pull/10603)
|
||||
- API container startup crash on Linux due to root-owned bind-mount preventing JWT key generation [(#10646)](https://github.com/prowler-cloud/prowler/pull/10646)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- `pytest` from 8.2.2 to 9.0.3 to fix CVE-2025-71176 [(#10678)](https://github.com/prowler-cloud/prowler/pull/10678)
|
||||
|
||||
---
|
||||
|
||||
## [1.24.0] (Prowler v5.23.0)
|
||||
|
||||
### 🚀 Added
|
||||
@@ -12,6 +45,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
- Finding groups list and latest endpoints support `sort=delta`, ordering by `new_count` then `changed_count` so groups with the most new findings rank highest [(#10606)](https://github.com/prowler-cloud/prowler/pull/10606)
|
||||
- Finding group resources endpoints (`/finding-groups/{check_id}/resources` and `/finding-groups/latest/{check_id}/resources`) now expose `finding_id` per row, pointing to the most recent matching Finding for each resource. UUIDv7 ordering guarantees `Max(finding__id)` resolves to the latest snapshot [(#10630)](https://github.com/prowler-cloud/prowler/pull/10630)
|
||||
- Handle CIS and CISA SCuBA compliance framework from google workspace [(#10629)](https://github.com/prowler-cloud/prowler/pull/10629)
|
||||
- Sort support for all finding group counter fields: `pass_muted_count`, `fail_muted_count`, `manual_muted_count`, and all `new_*`/`changed_*` status-mute breakdown counters [(#10655)](https://github.com/prowler-cloud/prowler/pull/10655)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ RUN mkdir -p /tmp/prowler_api_output
|
||||
COPY pyproject.toml ./
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir poetry
|
||||
pip install --no-cache-dir poetry==2.3.4
|
||||
|
||||
ENV PATH="/home/prowler/.local/bin:$PATH"
|
||||
|
||||
|
||||
@@ -56,7 +56,6 @@ start_worker() {
|
||||
|
||||
start_worker_beat() {
|
||||
echo "Starting the worker-beat..."
|
||||
sleep 15
|
||||
poetry run python -m celery -A config.celery beat -l "${DJANGO_LOGGING_LEVEL:-info}" --scheduler django_celery_beat.schedulers:DatabaseScheduler
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ dependencies = [
|
||||
"defusedxml==0.7.1",
|
||||
"gunicorn==23.0.0",
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.23",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.24",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (==1.3.0)",
|
||||
"sentry-sdk[django] (==2.56.0)",
|
||||
@@ -38,7 +38,7 @@ dependencies = [
|
||||
"matplotlib (==3.10.8)",
|
||||
"reportlab (==4.4.10)",
|
||||
"neo4j (==6.1.0)",
|
||||
"cartography (==0.132.0)",
|
||||
"cartography (==0.135.0)",
|
||||
"gevent (==25.9.1)",
|
||||
"werkzeug (==3.1.7)",
|
||||
"sqlparse (==0.5.5)",
|
||||
@@ -50,7 +50,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.24.0"
|
||||
version = "1.25.1"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
@@ -62,10 +62,9 @@ django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
filelock = "3.20.3"
|
||||
freezegun = "1.5.1"
|
||||
marshmallow = "==3.26.2"
|
||||
mypy = "1.10.1"
|
||||
pylint = "3.2.5"
|
||||
pytest = "8.2.2"
|
||||
pytest = "9.0.3"
|
||||
pytest-cov = "5.0.0"
|
||||
pytest-django = "4.8.0"
|
||||
pytest-env = "1.1.3"
|
||||
@@ -75,3 +74,4 @@ ruff = "0.5.0"
|
||||
safety = "3.7.0"
|
||||
tqdm = "4.67.1"
|
||||
vulture = "2.14"
|
||||
prek = "0.3.9"
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
from django.db import migrations
|
||||
|
||||
TASK_NAME = "attack-paths-cleanup-stale-scans"
|
||||
|
||||
|
||||
def set_cleanup_priority(apps, schema_editor):
|
||||
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
|
||||
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=0)
|
||||
|
||||
|
||||
def unset_cleanup_priority(apps, schema_editor):
|
||||
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
|
||||
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=None)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0089_backfill_finding_group_status_muted"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(set_cleanup_priority, unset_cleanup_priority),
|
||||
]
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.24.0
|
||||
version: 1.25.1
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ from api.models import (
|
||||
ProviderGroupMembership,
|
||||
ProviderSecret,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
Role,
|
||||
RoleProviderGroupRelationship,
|
||||
SAMLConfiguration,
|
||||
@@ -15465,7 +15466,7 @@ class TestFindingGroupViewSet:
|
||||
attrs = data[0]["attributes"]
|
||||
assert attrs["status"] == "FAIL"
|
||||
assert attrs["muted"] is True
|
||||
assert attrs["fail_count"] == 2
|
||||
assert attrs["fail_count"] == 0
|
||||
assert attrs["fail_muted_count"] == 2
|
||||
assert attrs["pass_muted_count"] == 0
|
||||
assert attrs["manual_muted_count"] == 0
|
||||
@@ -16030,6 +16031,36 @@ class TestFindingGroupViewSet:
|
||||
# s3_bucket_public_access has 2 findings with 2 different resources
|
||||
assert len(data) == 2
|
||||
|
||||
def test_resources_id_matches_resource_id_for_mapped_findings(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Findings with a resource expose the resource id as row id (hot path contract)."""
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
|
||||
),
|
||||
{"filter[inserted_at]": TODAY},
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert data, "expected resources in response"
|
||||
|
||||
resource_ids = set(
|
||||
ResourceFindingMapping.objects.filter(
|
||||
finding__check_id="s3_bucket_public_access",
|
||||
).values_list("resource_id", flat=True)
|
||||
)
|
||||
finding_ids = set(
|
||||
Finding.objects.filter(
|
||||
check_id="s3_bucket_public_access",
|
||||
).values_list("id", flat=True)
|
||||
)
|
||||
|
||||
returned_ids = {item["id"] for item in data}
|
||||
assert returned_ids <= {str(rid) for rid in resource_ids}
|
||||
assert returned_ids.isdisjoint({str(fid) for fid in finding_ids})
|
||||
|
||||
def test_resources_fields(self, authenticated_client, finding_groups_fixture):
|
||||
"""Test resource fields (uid, name, service, region, type) have valid values."""
|
||||
response = authenticated_client.get(
|
||||
@@ -17046,6 +17077,57 @@ class TestFindingGroupViewSet:
|
||||
# Descending boolean: True (1) before False (0)
|
||||
assert muted_values == sorted(muted_values, reverse=True)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"sort_field",
|
||||
[
|
||||
"pass_muted_count",
|
||||
"fail_muted_count",
|
||||
"manual_muted_count",
|
||||
"new_fail_count",
|
||||
"new_fail_muted_count",
|
||||
"new_pass_count",
|
||||
"new_pass_muted_count",
|
||||
"new_manual_count",
|
||||
"new_manual_muted_count",
|
||||
"changed_fail_count",
|
||||
"changed_fail_muted_count",
|
||||
"changed_pass_count",
|
||||
"changed_pass_muted_count",
|
||||
"changed_manual_count",
|
||||
"changed_manual_muted_count",
|
||||
],
|
||||
)
|
||||
def test_finding_groups_sort_by_counter_fields(
|
||||
self,
|
||||
authenticated_client,
|
||||
finding_groups_fixture,
|
||||
endpoint_name,
|
||||
sort_field,
|
||||
):
|
||||
"""All counter fields are accepted as sort parameters (asc and desc)."""
|
||||
params = {"sort": f"-{sort_field}"}
|
||||
if endpoint_name == "finding-group-list":
|
||||
params["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) > 0
|
||||
|
||||
desc_values = [item["attributes"][sort_field] for item in data]
|
||||
assert desc_values == sorted(desc_values, reverse=True)
|
||||
|
||||
params["sort"] = sort_field
|
||||
response = authenticated_client.get(reverse(endpoint_name), params)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
asc_values = [
|
||||
item["attributes"][sort_field] for item in response.json()["data"]
|
||||
]
|
||||
assert asc_values == sorted(asc_values)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_name", ["finding-group-list", "finding-group-latest"]
|
||||
)
|
||||
|
||||
@@ -4225,10 +4225,11 @@ class FindingGroupResourceSerializer(BaseSerializerV1):
|
||||
Serializer for Finding Group Resources - resources within a finding group.
|
||||
|
||||
Returns individual resources with their current status, severity,
|
||||
and timing information.
|
||||
and timing information. Orphan findings (without any resource) expose the
|
||||
finding id as `id` so the row stays identifiable in the UI.
|
||||
"""
|
||||
|
||||
id = serializers.UUIDField(source="resource_id")
|
||||
id = serializers.UUIDField(source="row_id")
|
||||
resource = serializers.SerializerMethodField()
|
||||
provider = serializers.SerializerMethodField()
|
||||
finding_id = serializers.UUIDField()
|
||||
|
||||
@@ -35,11 +35,13 @@ from django.db.models import (
|
||||
CharField,
|
||||
Count,
|
||||
DecimalField,
|
||||
Exists,
|
||||
ExpressionWrapper,
|
||||
F,
|
||||
IntegerField,
|
||||
Max,
|
||||
Min,
|
||||
OuterRef,
|
||||
Prefetch,
|
||||
Q,
|
||||
QuerySet,
|
||||
@@ -415,7 +417,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.24.0"
|
||||
spectacular_settings.VERSION = "1.25.1"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -7125,17 +7127,16 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
output_field=IntegerField(),
|
||||
)
|
||||
|
||||
# `pass_count`, `fail_count` and `manual_count` count *every* finding
|
||||
# for the check (muted or not) so the aggregated `status` reflects the
|
||||
# underlying check outcome regardless of mute state. Whether the group
|
||||
# is actionable is signalled by the orthogonal `muted` flag below.
|
||||
# `pass_count`, `fail_count` and `manual_count` only count non-muted
|
||||
# findings. Muted findings are tracked separately via the
|
||||
# `*_muted_count` fields.
|
||||
return (
|
||||
queryset.values("check_id")
|
||||
.annotate(
|
||||
severity_order=Max(severity_case),
|
||||
pass_count=Count("id", filter=Q(status="PASS")),
|
||||
fail_count=Count("id", filter=Q(status="FAIL")),
|
||||
manual_count=Count("id", filter=Q(status="MANUAL")),
|
||||
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
|
||||
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
|
||||
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
|
||||
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
|
||||
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
|
||||
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
|
||||
@@ -7280,12 +7281,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
# finding-level aggregation path.
|
||||
row.pop("nonmuted_count", None)
|
||||
|
||||
# Compute aggregated status. Counts are inclusive of muted findings,
|
||||
# so the underlying check outcome surfaces even when the group is
|
||||
# fully muted.
|
||||
if row.get("fail_count", 0) > 0:
|
||||
# Compute aggregated status from non-muted counts first, then
|
||||
# fall back to muted counts so fully-muted groups still reflect
|
||||
# the underlying check outcome.
|
||||
total_fail = row.get("fail_count", 0) + row.get("fail_muted_count", 0)
|
||||
total_pass = row.get("pass_count", 0) + row.get("pass_muted_count", 0)
|
||||
if total_fail > 0:
|
||||
row["status"] = "FAIL"
|
||||
elif row.get("pass_count", 0) > 0:
|
||||
elif total_pass > 0:
|
||||
row["status"] = "PASS"
|
||||
else:
|
||||
row["status"] = "MANUAL"
|
||||
@@ -7311,8 +7314,23 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
"pass_count": "pass_count",
|
||||
"manual_count": "manual_count",
|
||||
"muted_count": "muted_count",
|
||||
"pass_muted_count": "pass_muted_count",
|
||||
"fail_muted_count": "fail_muted_count",
|
||||
"manual_muted_count": "manual_muted_count",
|
||||
"new_count": "new_count",
|
||||
"new_fail_count": "new_fail_count",
|
||||
"new_fail_muted_count": "new_fail_muted_count",
|
||||
"new_pass_count": "new_pass_count",
|
||||
"new_pass_muted_count": "new_pass_muted_count",
|
||||
"new_manual_count": "new_manual_count",
|
||||
"new_manual_muted_count": "new_manual_muted_count",
|
||||
"changed_count": "changed_count",
|
||||
"changed_fail_count": "changed_fail_count",
|
||||
"changed_fail_muted_count": "changed_fail_muted_count",
|
||||
"changed_pass_count": "changed_pass_count",
|
||||
"changed_pass_muted_count": "changed_pass_muted_count",
|
||||
"changed_manual_count": "changed_manual_count",
|
||||
"changed_manual_muted_count": "changed_manual_muted_count",
|
||||
"resources_total": "resources_total",
|
||||
"resources_fail": "resources_fail",
|
||||
"first_seen_at": "agg_first_seen_at",
|
||||
@@ -7370,9 +7388,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
|
||||
if computed_params.get("status") or computed_params.getlist("status__in"):
|
||||
queryset = queryset.annotate(
|
||||
total_fail=F("fail_count") + F("fail_muted_count"),
|
||||
total_pass=F("pass_count") + F("pass_muted_count"),
|
||||
).annotate(
|
||||
aggregated_status=Case(
|
||||
When(fail_count__gt=0, then=Value("FAIL")),
|
||||
When(pass_count__gt=0, then=Value("PASS")),
|
||||
When(total_fail__gt=0, then=Value("FAIL")),
|
||||
When(total_pass__gt=0, then=Value("PASS")),
|
||||
default=Value("MANUAL"),
|
||||
output_field=CharField(),
|
||||
)
|
||||
@@ -7563,6 +7584,53 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
.order_by(*ordering)
|
||||
)
|
||||
|
||||
def _orphan_findings_queryset(self, filtered_queryset, finding_ids=None):
|
||||
"""Findings in the filtered set with no ResourceFindingMapping entries."""
|
||||
orphan_qs = filtered_queryset.filter(
|
||||
~Exists(ResourceFindingMapping.objects.filter(finding_id=OuterRef("pk")))
|
||||
)
|
||||
if finding_ids is not None:
|
||||
orphan_qs = orphan_qs.filter(id__in=finding_ids)
|
||||
return orphan_qs
|
||||
|
||||
def _has_orphan_findings(self, filtered_queryset) -> bool:
|
||||
"""Return True if any finding in the filtered set has no resource mapping."""
|
||||
return self._orphan_findings_queryset(filtered_queryset).exists()
|
||||
|
||||
def _orphan_aggregation_values(self, orphan_queryset):
|
||||
"""Raw rows for orphan findings; resource payload synthesized from metadata.
|
||||
|
||||
check_metadata is stored with lowercase keys (see
|
||||
`prowler.lib.outputs.finding.Finding.get_metadata`) and
|
||||
`Finding.resource_groups` is already denormalized at ingest time.
|
||||
"""
|
||||
return orphan_queryset.annotate(
|
||||
_provider_type=F("scan__provider__provider"),
|
||||
_provider_uid=F("scan__provider__uid"),
|
||||
_provider_alias=F("scan__provider__alias"),
|
||||
_svc=KeyTextTransform("servicename", "check_metadata"),
|
||||
_region=KeyTextTransform("region", "check_metadata"),
|
||||
_rtype=KeyTextTransform("resourcetype", "check_metadata"),
|
||||
_rgroup=F("resource_groups"),
|
||||
).values(
|
||||
"id",
|
||||
"uid",
|
||||
"status",
|
||||
"severity",
|
||||
"delta",
|
||||
"muted",
|
||||
"muted_reason",
|
||||
"first_seen_at",
|
||||
"inserted_at",
|
||||
"_provider_type",
|
||||
"_provider_uid",
|
||||
"_provider_alias",
|
||||
"_svc",
|
||||
"_region",
|
||||
"_rtype",
|
||||
"_rgroup",
|
||||
)
|
||||
|
||||
def _post_process_resources(self, resource_data):
|
||||
"""Convert resource aggregation rows to API output."""
|
||||
results = []
|
||||
@@ -7584,9 +7652,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
else:
|
||||
delta = None
|
||||
|
||||
resource_id = row["resource_id"]
|
||||
finding_id = str(row["finding_id"]) if row.get("finding_id") else None
|
||||
|
||||
results.append(
|
||||
{
|
||||
"resource_id": row["resource_id"],
|
||||
"row_id": resource_id,
|
||||
"resource_id": resource_id,
|
||||
"resource_uid": row["resource_uid"],
|
||||
"resource_name": row["resource_name"],
|
||||
"resource_service": row["resource_service"],
|
||||
@@ -7605,9 +7677,46 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
"muted": bool(row.get("muted", False)),
|
||||
"muted_reason": row.get("muted_reason"),
|
||||
"resource_group": row.get("resource_group", ""),
|
||||
"finding_id": (
|
||||
str(row["finding_id"]) if row.get("finding_id") else None
|
||||
),
|
||||
"finding_id": finding_id,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def _post_process_orphans(self, orphan_rows):
|
||||
"""Convert orphan finding rows into the same API shape as mapping rows."""
|
||||
results = []
|
||||
for row in orphan_rows:
|
||||
status_val = row["status"]
|
||||
status = status_val if status_val in ("FAIL", "PASS") else "MANUAL"
|
||||
|
||||
muted = bool(row["muted"])
|
||||
delta_val = row.get("delta")
|
||||
delta = delta_val if delta_val in ("new", "changed") and not muted else None
|
||||
|
||||
finding_id = str(row["id"])
|
||||
|
||||
results.append(
|
||||
{
|
||||
"row_id": finding_id,
|
||||
"resource_id": None,
|
||||
"resource_uid": row["uid"],
|
||||
"resource_name": row["uid"],
|
||||
"resource_service": row["_svc"] or "",
|
||||
"resource_region": row["_region"] or "",
|
||||
"resource_type": row["_rtype"] or "",
|
||||
"provider_type": row["_provider_type"],
|
||||
"provider_uid": row["_provider_uid"],
|
||||
"provider_alias": row["_provider_alias"],
|
||||
"status": status,
|
||||
"severity": row["severity"],
|
||||
"delta": delta,
|
||||
"first_seen_at": row["first_seen_at"],
|
||||
"last_seen_at": row["inserted_at"],
|
||||
"muted": muted,
|
||||
"muted_reason": row.get("muted_reason"),
|
||||
"resource_group": row["_rgroup"] or "",
|
||||
"finding_id": finding_id,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -7668,26 +7777,19 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
sort_param, self._FINDING_GROUP_SORT_MAP
|
||||
)
|
||||
if ordering:
|
||||
# status_order is annotated on demand so groups can be sorted by
|
||||
# their aggregated status (FAIL > PASS > MANUAL), mirroring the
|
||||
# priority used in _post_process_aggregation. Counts are
|
||||
# inclusive of muted findings, so the underlying check outcome
|
||||
# surfaces even for fully muted groups.
|
||||
if any(field.lstrip("-") == "status_order" for field in ordering):
|
||||
aggregated_queryset = aggregated_queryset.annotate(
|
||||
total_fail_for_sort=F("fail_count") + F("fail_muted_count"),
|
||||
total_pass_for_sort=F("pass_count") + F("pass_muted_count"),
|
||||
).annotate(
|
||||
status_order=Case(
|
||||
When(fail_count__gt=0, then=Value(3)),
|
||||
When(pass_count__gt=0, then=Value(2)),
|
||||
When(total_fail_for_sort__gt=0, then=Value(3)),
|
||||
When(total_pass_for_sort__gt=0, then=Value(2)),
|
||||
default=Value(1),
|
||||
output_field=IntegerField(),
|
||||
)
|
||||
)
|
||||
|
||||
# delta_order is a virtual sort field: expand it to a
|
||||
# lexicographic ordering by (new_count, changed_count) so groups
|
||||
# with more new findings rank higher, with changed_count as the
|
||||
# tie-breaker (preserves the "new > changed" priority used by
|
||||
# the resources endpoint, but driven by the actual counters).
|
||||
expanded_ordering = []
|
||||
for field in ordering:
|
||||
if field.lstrip("-") == "delta_order":
|
||||
@@ -7721,41 +7823,64 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
def _paginated_resource_response(
|
||||
self, request, filtered_queryset, resource_ids, tenant_id
|
||||
):
|
||||
"""Paginate and return resources.
|
||||
"""Paginate and return resources, appending orphan findings when present.
|
||||
|
||||
Without sort: paginate lightweight resource IDs first, aggregate only the page.
|
||||
With sort: build a lightweight ordering subquery (resource_id + sort keys),
|
||||
paginate that, then aggregate full details only for the page.
|
||||
Hot path (no orphans, or resource filter applied): resources come from
|
||||
ResourceFindingMapping aggregation. Untouched pre-existing behaviour.
|
||||
|
||||
Orphan fallback: findings without a mapping (e.g. IaC) are appended
|
||||
after mapping rows as synthesised resource-like rows so they remain
|
||||
visible in the UI without paying the aggregation cost on the hot path.
|
||||
"""
|
||||
sort_param = request.query_params.get("sort")
|
||||
|
||||
ordering = None
|
||||
if sort_param:
|
||||
ordering = self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
|
||||
if ordering:
|
||||
if "resource_id" not in {field.lstrip("-") for field in ordering}:
|
||||
ordering.append("resource_id")
|
||||
validated = self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
|
||||
ordering = validated if validated else None
|
||||
|
||||
# Phase 1: lightweight aggregation with only sort keys, paginate
|
||||
ordering_qs = self._build_resource_ordering_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=tenant_id,
|
||||
ordering=ordering,
|
||||
)
|
||||
page = self.paginate_queryset(ordering_qs)
|
||||
if page is not None:
|
||||
page_ids = [row["resource_id"] for row in page]
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
# Re-sort to match the page ordering
|
||||
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
# Resource filters can only match findings with resources; skip orphan
|
||||
# detection entirely when they are present.
|
||||
if resource_ids is not None:
|
||||
return self._mapping_paginated_response(
|
||||
request, filtered_queryset, resource_ids, tenant_id, ordering
|
||||
)
|
||||
|
||||
page_ids = [row["resource_id"] for row in ordering_qs]
|
||||
has_mappings = self._build_resource_mapping_queryset(
|
||||
filtered_queryset, resource_ids=None, tenant_id=tenant_id
|
||||
).exists()
|
||||
|
||||
if has_mappings:
|
||||
# Normal or mixed group: serve only resource-mapped rows.
|
||||
# TODO: Orphan findings in mixed groups are intentionally excluded
|
||||
# until the ephemeral resources strategy is decided. When resolved,
|
||||
# route mixed groups to _combined_paginated_response instead.
|
||||
return self._mapping_paginated_response(
|
||||
request, filtered_queryset, resource_ids, tenant_id, ordering
|
||||
)
|
||||
|
||||
# Pure orphan group (e.g. IaC): synthesize resource-like rows.
|
||||
return self._combined_paginated_response(
|
||||
request, filtered_queryset, tenant_id, ordering
|
||||
)
|
||||
|
||||
def _mapping_paginated_response(
|
||||
self, request, filtered_queryset, resource_ids, tenant_id, ordering
|
||||
):
|
||||
"""Mapping-only paginated response (original fast path)."""
|
||||
if ordering:
|
||||
if "resource_id" not in {field.lstrip("-") for field in ordering}:
|
||||
ordering.append("resource_id")
|
||||
|
||||
# Phase 1: lightweight aggregation with only sort keys, paginate
|
||||
ordering_qs = self._build_resource_ordering_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=resource_ids,
|
||||
tenant_id=tenant_id,
|
||||
ordering=ordering,
|
||||
)
|
||||
page = self.paginate_queryset(ordering_qs)
|
||||
if page is not None:
|
||||
page_ids = [row["resource_id"] for row in page]
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
@@ -7763,10 +7888,18 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
page_ids = [row["resource_id"] for row in ordering_qs]
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
|
||||
)
|
||||
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
|
||||
results = self._post_process_resources(resource_data)
|
||||
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
# No sort (or only empty sort fragments): paginate lightweight resource IDs
|
||||
# first, aggregate only the page.
|
||||
mapping_qs = self._build_resource_mapping_queryset(
|
||||
filtered_queryset, resource_ids=resource_ids, tenant_id=tenant_id
|
||||
)
|
||||
@@ -7794,6 +7927,95 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
def _combined_paginated_response(
|
||||
self, request, filtered_queryset, tenant_id, ordering
|
||||
):
|
||||
"""Mapping rows + orphan findings appended at end.
|
||||
|
||||
Orphans sit after mapping rows regardless of sort. This keeps the
|
||||
mapping-only code path intact for checks that have no orphans (the
|
||||
common case) and avoids paying UNION/coalesce costs there.
|
||||
"""
|
||||
mapping_qs = self._build_resource_mapping_queryset(
|
||||
filtered_queryset, resource_ids=None, tenant_id=tenant_id
|
||||
)
|
||||
mapping_count = mapping_qs.values("resource_id").distinct().count()
|
||||
|
||||
orphan_ids = list(
|
||||
self._orphan_findings_queryset(filtered_queryset)
|
||||
.order_by("id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
orphan_count = len(orphan_ids)
|
||||
total = mapping_count + orphan_count
|
||||
|
||||
# Paginate a simple [0..total) index sequence so DRF produces proper
|
||||
# links/meta; then slice mapping / orphan sources accordingly.
|
||||
page = self.paginate_queryset(range(total))
|
||||
page_indices = list(page) if page is not None else list(range(total))
|
||||
|
||||
mapping_indices = [i for i in page_indices if i < mapping_count]
|
||||
orphan_positions = [
|
||||
i - mapping_count for i in page_indices if i >= mapping_count
|
||||
]
|
||||
|
||||
mapping_results = []
|
||||
if mapping_indices:
|
||||
start = mapping_indices[0]
|
||||
stop = mapping_indices[-1] + 1
|
||||
if ordering:
|
||||
ordering_fields = list(ordering)
|
||||
if "resource_id" not in {
|
||||
field.lstrip("-") for field in ordering_fields
|
||||
}:
|
||||
ordering_fields.append("resource_id")
|
||||
ordered_qs = self._build_resource_ordering_queryset(
|
||||
filtered_queryset,
|
||||
resource_ids=None,
|
||||
tenant_id=tenant_id,
|
||||
ordering=ordering_fields,
|
||||
)
|
||||
slice_rids = [row["resource_id"] for row in ordered_qs[start:stop]]
|
||||
else:
|
||||
slice_rids = list(
|
||||
mapping_qs.values_list("resource_id", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_id")[start:stop]
|
||||
)
|
||||
if slice_rids:
|
||||
resource_data = self._build_resource_aggregation(
|
||||
filtered_queryset,
|
||||
resource_ids=slice_rids,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
rows_by_rid = {row["resource_id"]: row for row in resource_data}
|
||||
ordered_rows = [
|
||||
rows_by_rid[rid] for rid in slice_rids if rid in rows_by_rid
|
||||
]
|
||||
mapping_results = self._post_process_resources(ordered_rows)
|
||||
|
||||
orphan_results = []
|
||||
if orphan_positions:
|
||||
slice_fids = [orphan_ids[pos] for pos in orphan_positions]
|
||||
raw_rows = list(
|
||||
self._orphan_aggregation_values(
|
||||
self._orphan_findings_queryset(
|
||||
filtered_queryset, finding_ids=slice_fids
|
||||
)
|
||||
)
|
||||
)
|
||||
rows_by_fid = {row["id"]: row for row in raw_rows}
|
||||
ordered_rows = [
|
||||
rows_by_fid[fid] for fid in slice_fids if fid in rows_by_fid
|
||||
]
|
||||
orphan_results = self._post_process_orphans(ordered_rows)
|
||||
|
||||
results = mapping_results + orphan_results
|
||||
serializer = FindingGroupResourceSerializer(results, many=True)
|
||||
if page is not None:
|
||||
return self.get_paginated_response(serializer.data)
|
||||
return Response(serializer.data)
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""
|
||||
List finding groups with aggregation and filtering.
|
||||
|
||||
@@ -17,8 +17,10 @@ celery_app.config_from_object("django.conf:settings", namespace="CELERY")
|
||||
celery_app.conf.update(result_extended=True, result_expires=None)
|
||||
|
||||
celery_app.conf.broker_transport_options = {
|
||||
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
|
||||
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT,
|
||||
"queue_order_strategy": "priority",
|
||||
}
|
||||
celery_app.conf.task_default_priority = 6
|
||||
celery_app.conf.result_backend_transport_options = {
|
||||
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ from config.django.production import LOGGING as DJANGO_LOGGERS, DEBUG # noqa: E
|
||||
from config.custom_logging import BackendLogger # noqa: E402
|
||||
|
||||
BIND_ADDRESS = env("DJANGO_BIND_ADDRESS", default="127.0.0.1")
|
||||
PORT = env("DJANGO_PORT", default=8000)
|
||||
PORT = env("DJANGO_PORT", default=8080)
|
||||
|
||||
# Server settings
|
||||
bind = f"{BIND_ADDRESS}:{PORT}"
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Portions of this file are based on code from the Cartography project
|
||||
# (https://github.com/cartography-cncf/cartography), which is licensed under the Apache 2.0 License.
|
||||
|
||||
import time
|
||||
|
||||
from typing import Any
|
||||
|
||||
import aioboto3
|
||||
@@ -33,7 +35,7 @@ def start_aws_ingestion(
|
||||
|
||||
For the scan progress updates:
|
||||
- The caller of this function (`tasks.jobs.attack_paths.scan.run`) has set it to 2.
|
||||
- When the control returns to the caller, it will be set to 95.
|
||||
- When the control returns to the caller, it will be set to 93.
|
||||
"""
|
||||
|
||||
# Initialize variables common to all jobs
|
||||
@@ -89,34 +91,50 @@ def start_aws_ingestion(
|
||||
logger.info(
|
||||
f"Syncing function permission_relationships for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.RESOURCE_FUNCTIONS["permission_relationships"](**sync_args)
|
||||
logger.info(
|
||||
f"Synced function permission_relationships for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 88)
|
||||
|
||||
if "resourcegroupstaggingapi" in requested_syncs:
|
||||
logger.info(
|
||||
f"Syncing function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.RESOURCE_FUNCTIONS["resourcegroupstaggingapi"](**sync_args)
|
||||
logger.info(
|
||||
f"Synced function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 89)
|
||||
|
||||
logger.info(
|
||||
f"Syncing ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.run_scoped_analysis_job(
|
||||
"aws_ec2_iaminstanceprofile.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
logger.info(
|
||||
f"Synced ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 90)
|
||||
|
||||
logger.info(
|
||||
f"Syncing lambda_ecr analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.run_analysis_job(
|
||||
"aws_lambda_ecr.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
logger.info(
|
||||
f"Synced lambda_ecr analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
|
||||
if all(
|
||||
s in requested_syncs
|
||||
@@ -125,25 +143,34 @@ def start_aws_ingestion(
|
||||
logger.info(
|
||||
f"Syncing lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.run_scoped_analysis_job(
|
||||
"aws_lb_container_exposure.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
logger.info(
|
||||
f"Synced lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
|
||||
if all(s in requested_syncs for s in ["ec2:network_acls", "ec2:load_balancer_v2"]):
|
||||
logger.info(
|
||||
f"Syncing lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.run_scoped_analysis_job(
|
||||
"aws_lb_nacl_direct.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
logger.info(
|
||||
f"Synced lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 91)
|
||||
|
||||
logger.info(f"Syncing metadata for AWS account {prowler_api_provider.uid}")
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws.merge_module_sync_metadata(
|
||||
neo4j_session,
|
||||
group_type="AWSAccount",
|
||||
@@ -152,24 +179,23 @@ def start_aws_ingestion(
|
||||
update_tag=cartography_config.update_tag,
|
||||
stat_handler=cartography_aws.stat_handler,
|
||||
)
|
||||
logger.info(
|
||||
f"Synced metadata for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 92)
|
||||
|
||||
# Removing the added extra field
|
||||
del common_job_parameters["AWS_ID"]
|
||||
|
||||
logger.info(f"Syncing cleanup_job for AWS account {prowler_api_provider.uid}")
|
||||
cartography_aws.run_cleanup_job(
|
||||
"aws_post_ingestion_principals_cleanup.json",
|
||||
neo4j_session,
|
||||
common_job_parameters,
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
|
||||
|
||||
logger.info(f"Syncing analysis for AWS account {prowler_api_provider.uid}")
|
||||
t0 = time.perf_counter()
|
||||
cartography_aws._perform_aws_analysis(
|
||||
requested_syncs, neo4j_session, common_job_parameters
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
|
||||
logger.info(
|
||||
f"Synced analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
|
||||
|
||||
return failed_syncs
|
||||
|
||||
@@ -234,6 +260,8 @@ def sync_aws_account(
|
||||
)
|
||||
|
||||
try:
|
||||
func_t0 = time.perf_counter()
|
||||
|
||||
# `ecr:image_layers` uses `aioboto3_session` instead of `boto3_session`
|
||||
if func_name == "ecr:image_layers":
|
||||
cartography_aws.RESOURCE_FUNCTIONS[func_name](
|
||||
@@ -257,7 +285,15 @@ def sync_aws_account(
|
||||
else:
|
||||
cartography_aws.RESOURCE_FUNCTIONS[func_name](**sync_args)
|
||||
|
||||
logger.info(
|
||||
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s (FAILED)"
|
||||
)
|
||||
|
||||
exception_message = utils.stringify_exception(
|
||||
e, f"Exception for AWS sync function: {func_name}"
|
||||
)
|
||||
|
||||
@@ -8,9 +8,9 @@ from tasks.jobs.attack_paths import aws
|
||||
# Batch size for Neo4j write operations (resource labeling, cleanup)
|
||||
BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
|
||||
# Batch size for Postgres findings fetch (keyset pagination page size)
|
||||
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 500)
|
||||
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 1000)
|
||||
# Batch size for temp-to-tenant graph sync (nodes and relationships per cursor page)
|
||||
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 250)
|
||||
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 1000)
|
||||
|
||||
# Neo4j internal labels (Prowler-specific, not provider-specific)
|
||||
# - `Internet`: Singleton node representing external internet access for exposed-resource queries
|
||||
|
||||
@@ -5,7 +5,6 @@ This module handles:
|
||||
- Adding resource labels to Cartography nodes for efficient lookups
|
||||
- Loading Prowler findings into the graph
|
||||
- Linking findings to resources
|
||||
- Cleaning up stale findings
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
@@ -13,6 +12,7 @@ from typing import Any, Generator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
@@ -24,7 +24,6 @@ from tasks.jobs.attack_paths.config import (
|
||||
)
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
ADD_RESOURCE_LABEL_TEMPLATE,
|
||||
CLEANUP_FINDINGS_TEMPLATE,
|
||||
INSERT_FINDING_TEMPLATE,
|
||||
render_cypher_template,
|
||||
)
|
||||
@@ -88,18 +87,21 @@ def analysis(
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
) -> tuple[int, int]:
|
||||
"""
|
||||
Main entry point for Prowler findings analysis.
|
||||
|
||||
Adds resource labels, loads findings, and cleans up stale data.
|
||||
Adds resource labels and loads findings.
|
||||
Returns (labeled_nodes, findings_loaded).
|
||||
"""
|
||||
add_resource_label(
|
||||
total_labeled = add_resource_label(
|
||||
neo4j_session, prowler_api_provider.provider, str(prowler_api_provider.uid)
|
||||
)
|
||||
findings_data = stream_findings_with_resources(prowler_api_provider, scan_id)
|
||||
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
|
||||
cleanup_findings(neo4j_session, prowler_api_provider, config)
|
||||
total_loaded = load_findings(
|
||||
neo4j_session, findings_data, prowler_api_provider, config
|
||||
)
|
||||
return total_labeled, total_loaded
|
||||
|
||||
|
||||
def add_resource_label(
|
||||
@@ -149,12 +151,11 @@ def load_findings(
|
||||
findings_batches: Generator[list[dict[str, Any]], None, None],
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
) -> int:
|
||||
"""Load Prowler findings into the graph, linking them to resources."""
|
||||
query = render_cypher_template(
|
||||
INSERT_FINDING_TEMPLATE,
|
||||
{
|
||||
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
|
||||
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
|
||||
"__RESOURCE_LABEL__": get_provider_resource_label(
|
||||
prowler_api_provider.provider
|
||||
@@ -163,7 +164,6 @@ def load_findings(
|
||||
)
|
||||
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"prowler_version": ProwlerConfig.prowler_version,
|
||||
}
|
||||
@@ -181,28 +181,7 @@ def load_findings(
|
||||
neo4j_session.run(query, parameters)
|
||||
|
||||
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
|
||||
|
||||
|
||||
def cleanup_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
"""Remove stale findings (classic Cartography behaviour)."""
|
||||
parameters = {
|
||||
"last_updated": config.update_tag,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
batch = 1
|
||||
deleted_count = 1
|
||||
while deleted_count > 0:
|
||||
logger.info(f"Cleaning findings batch {batch}")
|
||||
|
||||
result = neo4j_session.run(CLEANUP_FINDINGS_TEMPLATE, parameters)
|
||||
|
||||
deleted_count = result.single().get("deleted_findings_count", 0)
|
||||
batch += 1
|
||||
return total_records
|
||||
|
||||
|
||||
# Findings Streaming (Generator-based)
|
||||
@@ -273,7 +252,9 @@ def _fetch_findings_batch(
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Use `all_objects` to get `Findings` even on soft-deleted `Providers`
|
||||
# But even the provider is already validated as active in this context
|
||||
qs = FindingModel.all_objects.filter(scan_id=scan_id).order_by("id")
|
||||
qs = FindingModel.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).order_by("id")
|
||||
|
||||
if after_id is not None:
|
||||
qs = qs.filter(id__gt=after_id)
|
||||
|
||||
@@ -13,14 +13,13 @@ from tasks.jobs.attack_paths.config import (
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
# Indexes for Prowler findings and resource lookups
|
||||
# Indexes for Prowler Findings and resource lookups
|
||||
FINDINGS_INDEX_STATEMENTS = [
|
||||
# Resource indexes for Prowler Finding lookups
|
||||
"CREATE INDEX aws_resource_arn IF NOT EXISTS FOR (n:_AWSResource) ON (n.arn);",
|
||||
"CREATE INDEX aws_resource_id IF NOT EXISTS FOR (n:_AWSResource) ON (n.id);",
|
||||
# Prowler Finding indexes
|
||||
f"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.id);",
|
||||
f"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.lastupdated);",
|
||||
f"CREATE INDEX prowler_finding_status IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.status);",
|
||||
# Internet node index for MERGE lookups
|
||||
f"CREATE INDEX internet_id IF NOT EXISTS FOR (n:{INTERNET_NODE_LABEL}) ON (n.id);",
|
||||
|
||||
@@ -32,17 +32,14 @@ ADD_RESOURCE_LABEL_TEMPLATE = """
|
||||
"""
|
||||
|
||||
INSERT_FINDING_TEMPLATE = f"""
|
||||
MATCH (account:__ROOT_NODE_LABEL__ {{id: $provider_uid}})
|
||||
UNWIND $findings_data AS finding_data
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_uid:__RESOURCE_LABEL__)
|
||||
WHERE resource_by_uid.__NODE_UID_FIELD__ = finding_data.resource_uid
|
||||
WITH account, finding_data, resource_by_uid
|
||||
OPTIONAL MATCH (resource_by_uid:__RESOURCE_LABEL__ {{__NODE_UID_FIELD__: finding_data.resource_uid}})
|
||||
WITH finding_data, resource_by_uid
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_id:__RESOURCE_LABEL__)
|
||||
OPTIONAL MATCH (resource_by_id:__RESOURCE_LABEL__ {{id: finding_data.resource_uid}})
|
||||
WHERE resource_by_uid IS NULL
|
||||
AND resource_by_id.id = finding_data.resource_uid
|
||||
WITH account, finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
|
||||
WITH finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
|
||||
WHERE resource IS NOT NULL
|
||||
|
||||
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
|
||||
@@ -80,17 +77,6 @@ INSERT_FINDING_TEMPLATE = f"""
|
||||
rel.lastupdated = $last_updated
|
||||
"""
|
||||
|
||||
CLEANUP_FINDINGS_TEMPLATE = f"""
|
||||
MATCH (finding:{PROWLER_FINDING_LABEL})
|
||||
WHERE finding.lastupdated < $last_updated
|
||||
|
||||
WITH finding LIMIT $batch_size
|
||||
|
||||
DETACH DELETE finding
|
||||
|
||||
RETURN COUNT(finding) AS deleted_findings_count
|
||||
"""
|
||||
|
||||
# Internet queries (used by internet.py)
|
||||
# ---------------------------------------
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ exception propagates to Celery.
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
from typing import Any
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
@@ -144,6 +145,12 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
attack_paths_scan, task_id, tenant_cartography_config
|
||||
)
|
||||
|
||||
scan_t0 = time.perf_counter()
|
||||
logger.info(
|
||||
f"Starting Attack Paths scan ({attack_paths_scan.id}) for "
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
|
||||
subgraph_dropped = False
|
||||
sync_completed = False
|
||||
provider_gated = False
|
||||
@@ -169,6 +176,7 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
|
||||
|
||||
# The real scan, where iterates over cloud services
|
||||
t0 = time.perf_counter()
|
||||
ingestion_exceptions = utils.call_within_event_loop(
|
||||
cartography_ingestion_function,
|
||||
tmp_neo4j_session,
|
||||
@@ -177,19 +185,23 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
prowler_sdk_provider,
|
||||
attack_paths_scan,
|
||||
)
|
||||
logger.info(
|
||||
f"Cartography ingestion completed in {time.perf_counter() - t0:.3f}s "
|
||||
f"(failed_syncs={len(ingestion_exceptions)})"
|
||||
)
|
||||
|
||||
# Post-processing: Just keeping it to be more Cartography compliant
|
||||
logger.info(
|
||||
f"Syncing Cartography ontology for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
cartography_ontology.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
|
||||
|
||||
logger.info(
|
||||
f"Syncing Cartography analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
cartography_analysis.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
|
||||
|
||||
# Creating Internet node and CAN_ACCESS relationships
|
||||
logger.info(
|
||||
@@ -198,14 +210,20 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
internet.analysis(
|
||||
tmp_neo4j_session, prowler_api_provider, tmp_cartography_config
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
|
||||
|
||||
# Adding Prowler Finding nodes and relationships
|
||||
logger.info(
|
||||
f"Syncing Prowler analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
findings.analysis(
|
||||
t0 = time.perf_counter()
|
||||
labeled_nodes, findings_loaded = findings.analysis(
|
||||
tmp_neo4j_session, prowler_api_provider, scan_id, tmp_cartography_config
|
||||
)
|
||||
logger.info(
|
||||
f"Prowler analysis completed in {time.perf_counter() - t0:.3f}s "
|
||||
f"(findings={findings_loaded}, labeled_nodes={labeled_nodes})"
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 97)
|
||||
|
||||
logger.info(
|
||||
@@ -227,22 +245,33 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
|
||||
db_utils.set_provider_graph_data_ready(attack_paths_scan, False)
|
||||
provider_gated = True
|
||||
graph_database.drop_subgraph(
|
||||
|
||||
t0 = time.perf_counter()
|
||||
deleted_nodes = graph_database.drop_subgraph(
|
||||
database=tenant_database_name,
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
logger.info(
|
||||
f"Deleted existing provider graph in {time.perf_counter() - t0:.3f}s "
|
||||
f"(deleted_nodes={deleted_nodes})"
|
||||
)
|
||||
subgraph_dropped = True
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 98)
|
||||
|
||||
logger.info(
|
||||
f"Syncing graph from {tmp_database_name} into {tenant_database_name}"
|
||||
)
|
||||
sync.sync_graph(
|
||||
t0 = time.perf_counter()
|
||||
sync_result = sync.sync_graph(
|
||||
source_database=tmp_database_name,
|
||||
target_database=tenant_database_name,
|
||||
tenant_id=str(prowler_api_provider.tenant_id),
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
logger.info(
|
||||
f"Synced graph in {time.perf_counter() - t0:.3f}s "
|
||||
f"(nodes={sync_result['nodes']}, relationships={sync_result['relationships']})"
|
||||
)
|
||||
sync_completed = True
|
||||
db_utils.set_graph_data_ready(attack_paths_scan, True)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 99)
|
||||
@@ -250,17 +279,16 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
logger.info(f"Clearing Neo4j cache for database {tenant_database_name}")
|
||||
graph_database.clear_cache(tenant_database_name)
|
||||
|
||||
logger.info(
|
||||
f"Completed Cartography ({attack_paths_scan.id}) for "
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
|
||||
logger.info(f"Dropping temporary Neo4j database {tmp_database_name}")
|
||||
graph_database.drop_database(tmp_database_name)
|
||||
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
|
||||
)
|
||||
logger.info(
|
||||
f"Attack Paths scan completed in {time.perf_counter() - scan_t0:.3f}s "
|
||||
f"(state=completed, failed_syncs={len(ingestion_exceptions)})"
|
||||
)
|
||||
return ingestion_exceptions
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -5,6 +5,8 @@ This module handles syncing graph data from temporary scan databases
|
||||
to the tenant database, adding provider isolation labels and properties.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
@@ -81,6 +83,7 @@ def sync_nodes(
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
"""
|
||||
t0 = time.perf_counter()
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
@@ -117,7 +120,7 @@ def sync_nodes(
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
f"Synced {total_synced} nodes from {source_database} to {target_database}"
|
||||
f"Synced {total_synced} nodes from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
@@ -136,6 +139,7 @@ def sync_relationships(
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
"""
|
||||
t0 = time.perf_counter()
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
@@ -166,7 +170,7 @@ def sync_relationships(
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
f"Synced {total_synced} relationships from {source_database} to {target_database}"
|
||||
f"Synced {total_synced} relationships from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
|
||||
@@ -752,11 +752,19 @@ def _process_finding_micro_batch(
|
||||
)
|
||||
|
||||
if mappings_to_create:
|
||||
ResourceFindingMapping.objects.bulk_create(
|
||||
created_mappings = ResourceFindingMapping.objects.bulk_create(
|
||||
mappings_to_create,
|
||||
batch_size=SCAN_DB_BATCH_SIZE,
|
||||
ignore_conflicts=True,
|
||||
unique_fields=["tenant_id", "resource_id", "finding_id"],
|
||||
)
|
||||
inserted = sum(1 for m in created_mappings if m.pk)
|
||||
if inserted != len(mappings_to_create):
|
||||
logger.error(
|
||||
f"scan {scan_instance.id}: expected "
|
||||
f"{len(mappings_to_create)} ResourceFindingMapping rows, "
|
||||
f"inserted {inserted}. Rolling back micro-batch."
|
||||
)
|
||||
|
||||
# Update finding denormalized arrays
|
||||
findings_to_update = []
|
||||
@@ -1804,11 +1812,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
|
||||
)
|
||||
|
||||
# Aggregate findings by check_id for this scan.
|
||||
# `pass_count`, `fail_count` and `manual_count` count *every* finding
|
||||
# in this group, regardless of mute state, so the aggregated `status`
|
||||
# always reflects the underlying check outcome (FAIL > PASS > MANUAL)
|
||||
# even when the group is fully muted. The orthogonal `muted` flag is
|
||||
# what tells whether the group has any actionable (non-muted) findings.
|
||||
# `pass_count`, `fail_count` and `manual_count` only count non-muted
|
||||
# findings. Muted findings are tracked separately via the
|
||||
# `*_muted_count` fields.
|
||||
aggregated = (
|
||||
Finding.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
@@ -1817,9 +1823,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
|
||||
.values("check_id")
|
||||
.annotate(
|
||||
severity_order=Max(severity_case),
|
||||
pass_count=Count("id", filter=Q(status="PASS")),
|
||||
fail_count=Count("id", filter=Q(status="FAIL")),
|
||||
manual_count=Count("id", filter=Q(status="MANUAL")),
|
||||
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
|
||||
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
|
||||
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
|
||||
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
|
||||
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
|
||||
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
|
||||
|
||||
@@ -38,11 +38,14 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.sync.sync_graph",
|
||||
return_value={"nodes": 0, "relationships": 0},
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph", return_value=0)
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -188,7 +191,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -287,7 +290,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -390,7 +393,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -489,14 +492,17 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.sync.sync_graph",
|
||||
return_value={"nodes": 0, "relationships": 0},
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
|
||||
side_effect=RuntimeError("drop failed"),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -609,7 +615,7 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -718,11 +724,14 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.sync.sync_graph",
|
||||
return_value={"nodes": 0, "relationships": 0},
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -833,14 +842,17 @@ class TestAttackPathsRun:
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.sync.sync_graph",
|
||||
return_value={"nodes": 0, "relationships": 0},
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
|
||||
side_effect=RuntimeError("drop failed"),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
|
||||
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@@ -1274,10 +1286,6 @@ class TestAttackPathsFindingsHelpers:
|
||||
mock_session = MagicMock()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_root_node_label",
|
||||
return_value="AWSAccount",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_node_uid_field",
|
||||
return_value="arn",
|
||||
@@ -1294,27 +1302,9 @@ class TestAttackPathsFindingsHelpers:
|
||||
assert mock_session.run.call_count == 2
|
||||
for call_args in mock_session.run.call_args_list:
|
||||
params = call_args.args[1]
|
||||
assert params["provider_uid"] == str(provider.uid)
|
||||
assert params["last_updated"] == config.update_tag
|
||||
assert "findings_data" in params
|
||||
|
||||
def test_cleanup_findings_runs_batches(self, providers_fixture):
|
||||
provider = providers_fixture[0]
|
||||
config = SimpleNamespace(update_tag=1024)
|
||||
mock_session = MagicMock()
|
||||
|
||||
first_batch = MagicMock()
|
||||
first_batch.single.return_value = {"deleted_findings_count": 3}
|
||||
second_batch = MagicMock()
|
||||
second_batch.single.return_value = {"deleted_findings_count": 0}
|
||||
mock_session.run.side_effect = [first_batch, second_batch]
|
||||
|
||||
findings_module.cleanup_findings(mock_session, provider, config)
|
||||
|
||||
assert mock_session.run.call_count == 2
|
||||
params = mock_session.run.call_args.args[1]
|
||||
assert params["last_updated"] == config.update_tag
|
||||
|
||||
def test_stream_findings_with_resources_returns_latest_scan_data(
|
||||
self,
|
||||
tenants_fixture,
|
||||
@@ -1690,10 +1680,6 @@ class TestAttackPathsFindingsHelpers:
|
||||
yield # Make it a generator
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_root_node_label",
|
||||
return_value="AWSAccount",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_node_uid_field",
|
||||
return_value="arn",
|
||||
|
||||
@@ -34,6 +34,10 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.worker.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: worker
|
||||
{{- with .Values.worker.securityContext }}
|
||||
|
||||
@@ -32,6 +32,10 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.worker_beat.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: worker-beat
|
||||
{{- with .Values.worker_beat.securityContext }}
|
||||
|
||||
@@ -220,7 +220,7 @@ def _send_prowler_results(prowler_results, _prowler_version, options):
|
||||
try:
|
||||
_debug("RESULT MSG --- {0}".format(_check_result), 2)
|
||||
_check_result = json.loads(TEMPLATE_CHECK.format(_check_result))
|
||||
except:
|
||||
except Exception:
|
||||
_debug(
|
||||
"INVALID JSON --- {0}".format(TEMPLATE_CHECK.format(_check_result)), 1
|
||||
)
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
services:
|
||||
api-dev-init:
|
||||
image: busybox:1.37.0
|
||||
volumes:
|
||||
- ./_data/api:/data
|
||||
command: ["sh", "-c", "chown -R 1000:1000 /data"]
|
||||
restart: "no"
|
||||
|
||||
api-dev:
|
||||
hostname: "prowler-api"
|
||||
image: prowler-api-dev
|
||||
@@ -21,12 +28,20 @@ services:
|
||||
- ./_data/api:/home/prowler/.config/prowler-api
|
||||
- outputs:/tmp/prowler_api_output
|
||||
depends_on:
|
||||
api-dev-init:
|
||||
condition: service_completed_successfully
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -q -O /dev/null http://127.0.0.1:${DJANGO_PORT:-8080}/api/v1/ || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
start_period: 60s
|
||||
entrypoint:
|
||||
- "/home/prowler/docker-entrypoint.sh"
|
||||
- "dev"
|
||||
@@ -139,11 +154,7 @@ services:
|
||||
- ./api/docker-entrypoint.sh:/home/prowler/docker-entrypoint.sh
|
||||
- outputs:/tmp/prowler_api_output
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
api-dev:
|
||||
condition: service_healthy
|
||||
ulimits:
|
||||
nofile:
|
||||
@@ -165,11 +176,7 @@ services:
|
||||
- path: ./.env
|
||||
required: false
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
api-dev:
|
||||
condition: service_healthy
|
||||
ulimits:
|
||||
nofile:
|
||||
|
||||
@@ -5,6 +5,13 @@
|
||||
# docker compose -f docker-compose-dev.yml up
|
||||
#
|
||||
services:
|
||||
api-init:
|
||||
image: busybox:1.37.0
|
||||
volumes:
|
||||
- ./_data/api:/data
|
||||
command: ["sh", "-c", "chown -R 1000:1000 /data"]
|
||||
restart: "no"
|
||||
|
||||
api:
|
||||
hostname: "prowler-api"
|
||||
image: prowlercloud/prowler-api:${PROWLER_API_VERSION:-stable}
|
||||
@@ -17,12 +24,20 @@ services:
|
||||
- ./_data/api:/home/prowler/.config/prowler-api
|
||||
- output:/tmp/prowler_api_output
|
||||
depends_on:
|
||||
api-init:
|
||||
condition: service_completed_successfully
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -q -O /dev/null http://127.0.0.1:${DJANGO_PORT:-8080}/api/v1/ || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
start_period: 60s
|
||||
entrypoint:
|
||||
- "/home/prowler/docker-entrypoint.sh"
|
||||
- "prod"
|
||||
@@ -114,9 +129,7 @@ services:
|
||||
volumes:
|
||||
- "output:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
api:
|
||||
condition: service_healthy
|
||||
ulimits:
|
||||
nofile:
|
||||
@@ -132,9 +145,7 @@ services:
|
||||
- path: ./.env
|
||||
required: false
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
api:
|
||||
condition: service_healthy
|
||||
ulimits:
|
||||
nofile:
|
||||
|
||||
@@ -118,18 +118,22 @@ In case you have any doubts, consult the [Poetry environment activation guide](h
|
||||
|
||||
### Pre-Commit Hooks
|
||||
|
||||
This repository uses Git pre-commit hooks managed by the [pre-commit](https://pre-commit.com/) tool, it is installed with `poetry install --with dev`. Next, run the following command in the root of this repository:
|
||||
This repository uses Git pre-commit hooks managed by the [prek](https://prek.j178.dev/) tool, it is installed with `poetry install --with dev`. Next, run the following command in the root of this repository:
|
||||
|
||||
```shell
|
||||
pre-commit install
|
||||
prek install
|
||||
```
|
||||
|
||||
Successful installation should produce the following output:
|
||||
|
||||
```shell
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
prek installed at `.git/hooks/pre-commit`
|
||||
```
|
||||
|
||||
<Warning>
|
||||
If pre-commit hooks were previously installed, run `prek install --overwrite` to replace the existing hook. Otherwise, both tools will run on each commit.
|
||||
</Warning>
|
||||
|
||||
### Code Quality and Security Checks
|
||||
|
||||
Before merging pull requests, several automated checks and utilities ensure code security and updated dependencies:
|
||||
|
||||
@@ -15,8 +15,7 @@ This document describes the internal architecture of Prowler Lighthouse AI, enab
|
||||
|
||||
Lighthouse AI operates as a Langchain-based agent that connects Large Language Models (LLMs) with Prowler security data through the Model Context Protocol (MCP).
|
||||
|
||||
<img className="block dark:hidden" src="/images/lighthouse-architecture-light.png" alt="Prowler Lighthouse Architecture" />
|
||||
<img className="hidden dark:block" src="/images/lighthouse-architecture-dark.png" alt="Prowler Lighthouse Architecture" />
|
||||

|
||||
|
||||
### Three-Tier Architecture
|
||||
|
||||
|
||||
@@ -12,6 +12,24 @@
|
||||
"dark": "/images/prowler-logo-white.png",
|
||||
"light": "/images/prowler-logo-black.png"
|
||||
},
|
||||
"contextual": {
|
||||
"options": [
|
||||
"copy",
|
||||
"view",
|
||||
{
|
||||
"title": "Request a feature",
|
||||
"description": "Open a feature request on GitHub",
|
||||
"icon": "plus",
|
||||
"href": "https://github.com/prowler-cloud/prowler/issues/new?template=feature-request.yml"
|
||||
},
|
||||
{
|
||||
"title": "Report an issue",
|
||||
"description": "Open a bug report on GitHub",
|
||||
"icon": "bug",
|
||||
"href": "https://github.com/prowler-cloud/prowler/issues/new?template=bug_report.yml"
|
||||
}
|
||||
]
|
||||
},
|
||||
"navigation": {
|
||||
"tabs": [
|
||||
{
|
||||
@@ -133,6 +151,7 @@
|
||||
]
|
||||
},
|
||||
"user-guide/tutorials/prowler-app-attack-paths",
|
||||
"user-guide/tutorials/prowler-app-finding-groups",
|
||||
"user-guide/tutorials/prowler-cloud-public-ips",
|
||||
{
|
||||
"group": "Tutorials",
|
||||
|
||||
@@ -121,8 +121,8 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.22.0"
|
||||
PROWLER_API_VERSION="5.22.0"
|
||||
PROWLER_UI_VERSION="5.24.0"
|
||||
PROWLER_API_VERSION="5.24.0"
|
||||
```
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -59,6 +59,10 @@ Prowler Lighthouse AI is powerful, but there are limitations:
|
||||
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
|
||||
- **Response quality**: The response quality depends on the selected LLM provider and model. Choose models with strong tool-calling capabilities for best results. We recommend `gpt-5` model from OpenAI.
|
||||
|
||||
## Architecture
|
||||
|
||||

|
||||
|
||||
## Extending Lighthouse AI
|
||||
|
||||
Lighthouse AI retrieves data through Prowler MCP. To add new capabilities, extend the Prowler MCP Server with additional tools and Lighthouse AI discovers them automatically.
|
||||
|
||||
@@ -46,8 +46,7 @@ Search and retrieve official Prowler documentation:
|
||||
|
||||
The following diagram illustrates the Prowler MCP Server architecture and its integration points:
|
||||
|
||||
<img className="block dark:hidden" src="/images/prowler_mcp_schema_light.png" alt="Prowler MCP Server Schema" />
|
||||
<img className="hidden dark:block" src="/images/prowler_mcp_schema_dark.png" alt="Prowler MCP Server Schema" />
|
||||

|
||||
|
||||
The architecture shows how AI assistants connect through the MCP protocol to access Prowler's three main components:
|
||||
- Prowler Cloud/App for security operations
|
||||
|
||||
|
After Width: | Height: | Size: 755 KiB |
|
After Width: | Height: | Size: 146 KiB |
|
After Width: | Height: | Size: 340 KiB |
|
After Width: | Height: | Size: 410 KiB |
|
Before Width: | Height: | Size: 267 KiB |
|
Before Width: | Height: | Size: 265 KiB |
@@ -0,0 +1,37 @@
|
||||
flowchart TB
|
||||
browser([Browser])
|
||||
|
||||
subgraph NEXTJS["Next.js Server"]
|
||||
route["API Route<br/>(auth + context assembly)"]
|
||||
agent["LangChain Agent"]
|
||||
|
||||
subgraph TOOLS["Agent Tools"]
|
||||
metatools["Meta-tools<br/>describe_tool / execute_tool / load_skill"]
|
||||
end
|
||||
|
||||
mcpclient["MCP Client<br/>(HTTP transport)"]
|
||||
end
|
||||
|
||||
llm["LLM Provider<br/>(OpenAI / Bedrock / OpenAI-compatible)"]
|
||||
|
||||
subgraph MCP["Prowler MCP Server"]
|
||||
app_tools["prowler_app_* tools<br/>(auth required)"]
|
||||
hub_tools["prowler_hub_* tools<br/>(no auth)"]
|
||||
docs_tools["prowler_docs_* tools<br/>(no auth)"]
|
||||
end
|
||||
|
||||
api["Prowler API"]
|
||||
hub["hub.prowler.com"]
|
||||
docs["docs.prowler.com<br/>(Mintlify)"]
|
||||
|
||||
browser <-->|SSE stream| route
|
||||
route --> agent
|
||||
agent <-->|LLM API| llm
|
||||
agent --> metatools
|
||||
metatools --> mcpclient
|
||||
mcpclient -->|MCP HTTP · Bearer token<br/>for prowler_app_* only| app_tools
|
||||
mcpclient -->|MCP HTTP| hub_tools
|
||||
mcpclient -->|MCP HTTP| docs_tools
|
||||
app_tools -->|REST| api
|
||||
hub_tools -->|REST| hub
|
||||
docs_tools -->|REST| docs
|
||||
|
After Width: | Height: | Size: 286 KiB |
@@ -23,6 +23,8 @@ flowchart TB
|
||||
user --> ui
|
||||
user --> cli
|
||||
ui -->|REST| api
|
||||
ui -->|MCP HTTP| mcp
|
||||
mcp -->|REST| api
|
||||
api --> pg
|
||||
api --> valkey
|
||||
beat -->|enqueue jobs| valkey
|
||||
@@ -31,7 +33,5 @@ flowchart TB
|
||||
worker -->|Attack Paths| neo4j
|
||||
worker -->|invokes| sdk
|
||||
cli --> sdk
|
||||
api -. AI tools .-> mcp
|
||||
mcp -. context .-> api
|
||||
|
||||
sdk --> providers
|
||||
|
||||
|
Before Width: | Height: | Size: 268 KiB After Width: | Height: | Size: 348 KiB |
@@ -0,0 +1,29 @@
|
||||
flowchart LR
|
||||
subgraph HOSTS["MCP Hosts"]
|
||||
chat["Chat Interfaces<br/>(Claude Desktop, LobeChat)"]
|
||||
ide["IDEs and Code Editors<br/>(Claude Code, Cursor)"]
|
||||
apps["Other AI Applications<br/>(5ire, custom agents)"]
|
||||
end
|
||||
|
||||
subgraph MCP["Prowler MCP Server"]
|
||||
app_tools["prowler_app_* tools<br/>(JWT or API key auth)<br/>Findings · Providers · Scans<br/>Resources · Muting · Compliance<br/>Attack Paths"]
|
||||
hub_tools["prowler_hub_* tools<br/>(no auth)<br/>Checks Catalog · Check Code<br/>Fixers · Compliance Frameworks"]
|
||||
docs_tools["prowler_docs_* tools<br/>(no auth)<br/>Search · Document Retrieval"]
|
||||
end
|
||||
|
||||
api["Prowler API<br/>(REST)"]
|
||||
hub["hub.prowler.com<br/>(REST)"]
|
||||
docs["docs.prowler.com<br/>(Mintlify)"]
|
||||
|
||||
chat -->|STDIO or HTTP| app_tools
|
||||
chat -->|STDIO or HTTP| hub_tools
|
||||
chat -->|STDIO or HTTP| docs_tools
|
||||
ide -->|STDIO or HTTP| app_tools
|
||||
ide -->|STDIO or HTTP| hub_tools
|
||||
ide -->|STDIO or HTTP| docs_tools
|
||||
apps -->|STDIO or HTTP| app_tools
|
||||
apps -->|STDIO or HTTP| hub_tools
|
||||
apps -->|STDIO or HTTP| docs_tools
|
||||
app_tools -->|REST| api
|
||||
hub_tools -->|REST| hub
|
||||
docs_tools -->|REST| docs
|
||||
|
After Width: | Height: | Size: 371 KiB |
|
Before Width: | Height: | Size: 328 KiB |
|
Before Width: | Height: | Size: 332 KiB |
@@ -33,6 +33,41 @@ To scan a particular AWS region with Prowler, use:
|
||||
prowler aws -f/--region eu-west-1 us-east-1
|
||||
```
|
||||
|
||||
### Excluding Specific Regions
|
||||
|
||||
To scan all supported AWS regions except a specific subset, use the `--excluded-region` flag:
|
||||
|
||||
```console
|
||||
prowler aws --excluded-region eu-west-1 me-south-1
|
||||
```
|
||||
|
||||
You can also configure the exclusion list with the `PROWLER_AWS_DISALLOWED_REGIONS` environment variable as a comma-separated list:
|
||||
|
||||
```console
|
||||
export PROWLER_AWS_DISALLOWED_REGIONS="eu-west-1,me-south-1"
|
||||
prowler aws
|
||||
```
|
||||
|
||||
Or with the AWS provider configuration in `config.yaml`:
|
||||
|
||||
```yaml
|
||||
aws:
|
||||
disallowed_regions:
|
||||
- eu-west-1
|
||||
- me-south-1
|
||||
```
|
||||
|
||||
When more than one source is set, precedence is:
|
||||
|
||||
1. `--excluded-region`
|
||||
2. `PROWLER_AWS_DISALLOWED_REGIONS`
|
||||
3. `aws.disallowed_regions` in `config.yaml`
|
||||
|
||||
<Note>
|
||||
For self-hosted App or API-triggered scans, set `PROWLER_AWS_DISALLOWED_REGIONS` in the runtime environment of the backend scan containers such as `api` and `worker`. The `ui` container does not enforce AWS region selection.
|
||||
|
||||
</Note>
|
||||
|
||||
### AWS Credentials Configuration
|
||||
|
||||
For details on configuring AWS credentials, refer to the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
|
||||
|
||||
@@ -0,0 +1,119 @@
|
||||
---
|
||||
title: 'Finding Groups'
|
||||
description: 'Organize and triage security findings by check to reduce noise and prioritize remediation effectively.'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.23.0" />
|
||||
|
||||
Finding Groups transforms security findings triage by grouping them by check instead of displaying a flat list. This dramatically reduces noise and enables faster, more effective prioritization.
|
||||
|
||||
## Triage Challenges with Flat Finding Lists
|
||||
|
||||
A real cloud environment produces thousands of findings per scan. A flat list makes it impossible to triage effectively:
|
||||
|
||||
- **Signal buried in noise**: the same misconfiguration repeated across 200 resources shows up as 200 rows, burying the signal in repetitive data
|
||||
- **Prioritization guesswork**: without grouping, understanding which issues affect the most resources requires manual counting and correlation
|
||||
- **Tedious muting**: muting a false positive globally requires manually acting on each individual finding across the list
|
||||
- **Lost context**: when investigating a single resource, related findings are scattered across the same flat list, making it hard to see the full picture
|
||||
|
||||
## How Finding Groups Addresses These Challenges
|
||||
|
||||
Finding Groups addresses these challenges by intelligently grouping findings by check.
|
||||
|
||||
### Grouped View at a Glance
|
||||
|
||||
Each row represents a single check title with key information immediately visible:
|
||||
|
||||
- **Severity** indicator for quick risk assessment
|
||||
- **Impacted providers** showing which cloud platforms are affected
|
||||
- **X of Y impacted resources** counter displaying how many resources fail this check
|
||||
|
||||
For example, `Vercel project has the Web Application Firewall enabled` across every affected project collapses to a single row — not one per project. Sort or filter by severity, provider, or status at the group level to triage top-down instead of drowning in per-resource rows.
|
||||
|
||||

|
||||
|
||||
### Expanding Groups for Details
|
||||
|
||||
Expand any group inline to see the failing resources with detailed information:
|
||||
|
||||
| Column | Description |
|
||||
|--------|-------------|
|
||||
| **UID** | Unique identifier for the resource |
|
||||
| **Service** | The cloud service the resource belongs to |
|
||||
| **Region** | Geographic region where the resource is deployed |
|
||||
| **Severity** | Risk level of the finding |
|
||||
| **Provider** | Cloud provider (AWS, Azure, GCP, Kubernetes, etc.) |
|
||||
| **Last Seen** | When the finding was last detected |
|
||||
| **Failing For** | Duration the resource has been in a failing state |
|
||||
|
||||

|
||||
|
||||
### Resource Detail Drawer
|
||||
|
||||
Select any resource to open the detail drawer with full finding context:
|
||||
|
||||
- **Risk**: the security risk associated with this finding
|
||||
- **Description**: detailed explanation of what was detected
|
||||
- **Status Extended**: additional status information and context
|
||||
- **Remediation**: step-by-step guidance to resolve the issue
|
||||
- **View in Prowler Hub**: direct link to explore the check in Prowler Hub
|
||||
- **Analyze This Finding With Lighthouse AI**: one-click AI-powered analysis for deeper insights
|
||||
|
||||

|
||||
|
||||
### Bulk Actions
|
||||
|
||||
Bulk-mute an entire group instead of chasing duplicates across the list. This is especially useful for:
|
||||
|
||||
- Known false positives that appear across many resources
|
||||
- Findings in development or test environments
|
||||
- Accepted risks that have been documented and approved
|
||||
|
||||
<Warning>
|
||||
Muting findings does not resolve underlying security issues. Review each finding carefully before muting to ensure it represents an acceptable risk or has been properly addressed.
|
||||
</Warning>
|
||||
|
||||
## Other Findings for This Resource
|
||||
|
||||
Inside the resource detail drawer, the **Other Findings For This Resource** tab lists every finding that hits the same resource — passing, failing, and muted — alongside the one currently being reviewed.
|
||||
|
||||

|
||||
|
||||
### Why This Matters
|
||||
|
||||
When reviewing "WAF not enabled" on a Vercel project, the tab immediately shows:
|
||||
|
||||
- Skew protection status
|
||||
- Rate limiting configuration
|
||||
- IP blocking settings
|
||||
- Custom firewall rules
|
||||
- Password protection findings
|
||||
|
||||
All for that same project, without navigating back to the main list and filtering by resource UID.
|
||||
|
||||
### Complete Context Within the Drawer
|
||||
|
||||
Pair the Other Findings tab with:
|
||||
|
||||
- **Scans tab**: scan history for this resource
|
||||
- **Events tab**: changes and events over time
|
||||
|
||||
This provides full context without leaving the drawer.
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start with high severity groups**: focus on critical and high severity groups first for maximum impact.
|
||||
2. **Use filters strategically**: filter by provider or status at the group level to narrow the triage scope.
|
||||
3. **Leverage bulk mute**: when a finding represents a confirmed false positive, mute the entire group at once.
|
||||
4. **Check related findings**: review the Other Findings tab to understand the full security posture of a resource.
|
||||
5. **Track failure duration**: use the "Failing For" column to prioritize long-standing issues that may indicate systemic problems.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Navigate to the **Findings** section in Prowler Cloud/App.
|
||||
2. Toggle to the **Grouped View** to see findings organized by check.
|
||||
3. Select any group row to expand and see affected resources.
|
||||
4. Select a resource to open the detail drawer with full context.
|
||||
5. Use the **Other Findings For This Resource** tab to see all findings for that resource.
|
||||