mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-14 00:02:47 +00:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 173659ae0b | |||
| 37abe6b47e | |||
| 7b2ad97317 |
@@ -26,18 +26,10 @@ runs:
|
||||
if: github.event_name == 'pull_request' && github.base_ref == 'master' && github.repository == 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
env:
|
||||
HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
run: |
|
||||
BRANCH_NAME="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
|
||||
UPSTREAM="prowler-cloud/prowler"
|
||||
if [ "$HEAD_REPO" != "$UPSTREAM" ]; then
|
||||
echo "Fork PR detected (${HEAD_REPO}), rewriting VCS URL to fork"
|
||||
sed -i "s|git+https://github.com/prowler-cloud/prowler\([^@]*\)@master|git+https://github.com/${HEAD_REPO}\1@$BRANCH_NAME|g" pyproject.toml
|
||||
else
|
||||
echo "Same-repo PR, using branch: $BRANCH_NAME"
|
||||
sed -i "s|\(git+https://github.com/prowler-cloud/prowler[^@]*\)@master|\1@$BRANCH_NAME|g" pyproject.toml
|
||||
fi
|
||||
echo "Using branch: $BRANCH_NAME"
|
||||
sed -i "s|\(git+https://github.com/prowler-cloud/prowler[^@]*\)@master|\1@$BRANCH_NAME|g" pyproject.toml
|
||||
|
||||
- name: Install poetry
|
||||
shell: bash
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
current_api_version: ${{ steps.get_api_version.outputs.current_api_version }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -118,7 +118,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next API minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Checkout version branch
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
|
||||
persist-credentials: false
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first API patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -255,7 +255,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next API patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -33,14 +33,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
api/**
|
||||
|
||||
@@ -42,17 +42,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/api-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
category: '/language:${{ matrix.language }}'
|
||||
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -95,18 +95,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Pin prowler SDK to latest master commit
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
LATEST_SHA=$(git ls-remote https://github.com/prowler-cloud/prowler.git refs/heads/master | cut -f1)
|
||||
sed -i "s|prowler-cloud/prowler.git@master|prowler-cloud/prowler.git@${LATEST_SHA}|" api/pyproject.toml
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -117,7 +111,7 @@ jobs:
|
||||
- name: Build and push API container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
@@ -135,7 +129,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -167,7 +161,7 @@ jobs:
|
||||
|
||||
- name: Install regctl
|
||||
if: always()
|
||||
uses: regclient/actions/regctl-installer@da9319db8e44e8b062b3a147e1dfb2f574d41a03 # main
|
||||
uses: regclient/actions/regctl-installer@f61d18f46c86af724a9c804cb9ff2a6fec741c7c # main
|
||||
|
||||
- name: Cleanup intermediate architecture tags
|
||||
if: always()
|
||||
@@ -186,7 +180,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -28,14 +28,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: api/Dockerfile
|
||||
|
||||
@@ -66,14 +66,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: api/**
|
||||
files_ignore: |
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
|
||||
- name: Build container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
|
||||
@@ -33,14 +33,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
api/**
|
||||
@@ -64,9 +64,8 @@ jobs:
|
||||
|
||||
- name: Safety
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run safety check --ignore 79023,79027,86217
|
||||
run: poetry run safety check --ignore 79023,79027
|
||||
# TODO: 79023 & 79027 knack ReDoS until `azure-cli-core` (via `cartography`) allows `knack` >=0.13.0
|
||||
# TODO: 86217 because `alibabacloud-tea-openapi == 0.4.3` don't let us upgrade `cryptography >= 46.0.0`
|
||||
|
||||
- name: Vulture
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -73,14 +73,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
api/**
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
current_docs_version: ${{ steps.get_docs_version.outputs.current_docs_version }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -114,7 +114,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for documentation update to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Checkout version branch
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
|
||||
persist-credentials: false
|
||||
@@ -174,7 +174,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for documentation update to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -245,7 +245,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for documentation update to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -93,12 +93,12 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
- name: Build and push MCP container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -28,14 +28,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: mcp_server/Dockerfile
|
||||
|
||||
@@ -65,14 +65,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for MCP changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: mcp_server/**
|
||||
files_ignore: |
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
|
||||
- name: Build MCP container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.MCP_WORKING_DIR }}
|
||||
push: false
|
||||
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
enable-cache: false
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# zizmor: ignore[artipacked]
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
api/**
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout PR head
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: '**'
|
||||
|
||||
|
||||
@@ -27,14 +27,14 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
@@ -345,7 +345,7 @@ jobs:
|
||||
|
||||
- name: Create PR for API dependency update
|
||||
if: ${{ env.PATCH_VERSION == '0' }}
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
commit-message: 'chore(api): update prowler dependency to ${{ env.BRANCH_NAME }} for release ${{ env.PROWLER_VERSION }}'
|
||||
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -115,7 +115,7 @@ jobs:
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Checkout version branch
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
|
||||
persist-credentials: false
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -176,7 +176,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -31,14 +31,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: ./**
|
||||
files_ignore: |
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
|
||||
@@ -49,17 +49,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/sdk-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
category: '/language:${{ matrix.language }}'
|
||||
|
||||
@@ -61,12 +61,12 @@ jobs:
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -155,18 +155,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Public ECR
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
|
||||
@@ -180,7 +180,7 @@ jobs:
|
||||
- name: Build and push SDK container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
@@ -199,13 +199,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Public ECR
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
|
||||
@@ -266,7 +266,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -27,14 +27,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: Dockerfile
|
||||
|
||||
@@ -65,14 +65,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: ./**
|
||||
files_ignore: |
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
|
||||
- name: Build SDK container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
|
||||
@@ -25,13 +25,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: 'master'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip'
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
run: pip install boto3
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@8df5847569e6427dd6c4fb1cf565c83acfa8afa7 # v6.0.0
|
||||
uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ secrets.DEV_IAM_ROLE_ARN }}
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
|
||||
- name: Create pull request
|
||||
id: create-pr
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
author: 'prowler-bot <179230569+prowler-bot@users.noreply.github.com>'
|
||||
|
||||
@@ -23,13 +23,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: 'master'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip'
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Create pull request
|
||||
id: create-pr
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
author: 'prowler-bot <179230569+prowler-bot@users.noreply.github.com>'
|
||||
@@ -72,13 +72,12 @@ jobs:
|
||||
This PR updates the `OCI_COMMERCIAL_REGIONS` dictionary in `prowler/providers/oraclecloud/config.py` with the latest regions fetched from the OCI Identity API (`list_regions()`).
|
||||
|
||||
- Government regions (`OCI_GOVERNMENT_REGIONS`) are preserved unchanged
|
||||
- DOD regions (`OCI_US_DOD_REGIONS`) are preserved unchanged
|
||||
- Region display names are mapped from Oracle's official documentation
|
||||
|
||||
### Checklist
|
||||
|
||||
- [x] This is an automated update from OCI official sources
|
||||
- [x] Government regions (us-langley-1, us-luke-1) and DOD regions (us-gov-ashburn-1, us-gov-phoenix-1, us-gov-chicago-1) are preserved
|
||||
- [x] Government regions (us-langley-1, us-luke-1) preserved
|
||||
- [x] No manual review of region data required
|
||||
|
||||
### License
|
||||
|
||||
@@ -24,14 +24,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files:
|
||||
./**
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
|
||||
- name: Set up Python 3.12
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: 'poetry'
|
||||
|
||||
@@ -31,14 +31,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: ./**
|
||||
files_ignore: |
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
@@ -80,7 +80,7 @@ jobs:
|
||||
- name: Check if AWS files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-aws
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/aws/**
|
||||
@@ -210,7 +210,7 @@ jobs:
|
||||
- name: Check if Azure files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-azure
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/azure/**
|
||||
@@ -234,7 +234,7 @@ jobs:
|
||||
- name: Check if GCP files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-gcp
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/gcp/**
|
||||
@@ -258,7 +258,7 @@ jobs:
|
||||
- name: Check if Kubernetes files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-kubernetes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/kubernetes/**
|
||||
@@ -282,7 +282,7 @@ jobs:
|
||||
- name: Check if GitHub files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-github
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/github/**
|
||||
@@ -306,7 +306,7 @@ jobs:
|
||||
- name: Check if NHN files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-nhn
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/nhn/**
|
||||
@@ -330,7 +330,7 @@ jobs:
|
||||
- name: Check if M365 files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-m365
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/m365/**
|
||||
@@ -354,7 +354,7 @@ jobs:
|
||||
- name: Check if IaC files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-iac
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/iac/**
|
||||
@@ -378,7 +378,7 @@ jobs:
|
||||
- name: Check if MongoDB Atlas files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-mongodbatlas
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/mongodbatlas/**
|
||||
@@ -402,7 +402,7 @@ jobs:
|
||||
- name: Check if OCI files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-oraclecloud
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/oraclecloud/**
|
||||
@@ -426,7 +426,7 @@ jobs:
|
||||
- name: Check if OpenStack files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-openstack
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/openstack/**
|
||||
@@ -450,7 +450,7 @@ jobs:
|
||||
- name: Check if Google Workspace files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-googleworkspace
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/googleworkspace/**
|
||||
@@ -474,7 +474,7 @@ jobs:
|
||||
- name: Check if Lib files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-lib
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/lib/**
|
||||
@@ -498,7 +498,7 @@ jobs:
|
||||
- name: Check if Config files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-config
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
./prowler/config/**
|
||||
|
||||
@@ -48,17 +48,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next minor version to master
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Checkout version branch
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
|
||||
persist-credentials: false
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
@@ -180,7 +180,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -214,7 +214,7 @@ jobs:
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next patch version to version branch
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
@@ -45,17 +45,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/ui-codeql-config.yml
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@89a39a4e59826350b863aa6b6252a07ad50cf83e # v4.32.4
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
category: '/language:${{ matrix.language }}'
|
||||
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
message-ts: ${{ steps.slack-notification.outputs.ts }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -97,12 +97,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
- name: Build and push UI container for ${{ matrix.arch }}
|
||||
id: container-push
|
||||
if: github.event_name == 'push' || github.event_name == 'release' || github.event_name == 'workflow_dispatch'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -28,14 +28,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: ui/Dockerfile
|
||||
|
||||
@@ -66,14 +66,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for UI changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: ui/**
|
||||
files_ignore: |
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- name: Build UI container for ${{ matrix.arch }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.UI_WORKING_DIR }}
|
||||
target: prod
|
||||
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -152,7 +152,7 @@ jobs:
|
||||
'
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version: '24.13.0'
|
||||
|
||||
@@ -166,7 +166,7 @@ jobs:
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm and Next.js cache
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: |
|
||||
${{ env.STORE_PATH }}
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
run: pnpm run build
|
||||
|
||||
- name: Cache Playwright browsers
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
|
||||
@@ -30,14 +30,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Check for UI changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
ui/**
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
- name: Get changed source files for targeted tests
|
||||
id: changed-source
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
ui/**/*.ts
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
- name: Check for critical path changes (run all tests)
|
||||
id: critical-changes
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files: |
|
||||
ui/lib/**
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
- name: Setup Node.js ${{ env.NODE_VERSION }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
|
||||
- name: Setup pnpm and Next.js cache
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: |
|
||||
${{ env.STORE_PATH }}
|
||||
|
||||
@@ -163,6 +163,3 @@ GEMINI.md
|
||||
.codex/skills
|
||||
.github/skills
|
||||
.gemini/skills
|
||||
|
||||
# Claude Code
|
||||
.claude/*
|
||||
|
||||
@@ -22,13 +22,6 @@ repos:
|
||||
args: [--autofix]
|
||||
files: pyproject.toml
|
||||
|
||||
## GITHUB ACTIONS
|
||||
- repo: https://github.com/zizmorcore/zizmor-pre-commit
|
||||
rev: v1.6.0
|
||||
hooks:
|
||||
- id: zizmor
|
||||
files: ^\.github/
|
||||
|
||||
## BASH
|
||||
- repo: https://github.com/koalaman/shellcheck-precommit
|
||||
rev: v0.10.0
|
||||
@@ -127,8 +120,7 @@ repos:
|
||||
description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities"
|
||||
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
|
||||
# TODO: 79023 & 79027 knack ReDoS until `azure-cli-core` (via `cartography`) allows `knack` >=0.13.0
|
||||
# TODO: 86217 because `alibabacloud-tea-openapi == 0.4.3` don't let us upgrade `cryptography >= 46.0.0`
|
||||
entry: bash -c 'safety check --ignore 70612,66963,74429,76352,76353,77744,77745,79023,79027,86217'
|
||||
entry: bash -c 'safety check --ignore 70612,66963,74429,76352,76353,77744,77745,79023,79027'
|
||||
language: system
|
||||
|
||||
- id: vulture
|
||||
|
||||
@@ -46,8 +46,6 @@ Use these skills for detailed patterns on-demand:
|
||||
| `prowler-commit` | Professional commits (conventional-commits) | [SKILL.md](skills/prowler-commit/SKILL.md) |
|
||||
| `prowler-pr` | Pull request conventions | [SKILL.md](skills/prowler-pr/SKILL.md) |
|
||||
| `prowler-docs` | Documentation style guide | [SKILL.md](skills/prowler-docs/SKILL.md) |
|
||||
| `django-migration-psql` | Django migration best practices for PostgreSQL | [SKILL.md](skills/django-migration-psql/SKILL.md) |
|
||||
| `postgresql-indexing` | PostgreSQL indexing, EXPLAIN, monitoring, maintenance | [SKILL.md](skills/postgresql-indexing/SKILL.md) |
|
||||
| `prowler-attack-paths-query` | Create Attack Paths openCypher queries | [SKILL.md](skills/prowler-attack-paths-query/SKILL.md) |
|
||||
| `gh-aw` | GitHub Agentic Workflows (gh-aw) | [SKILL.md](skills/gh-aw/SKILL.md) |
|
||||
| `skill-creator` | Create new AI agent skills | [SKILL.md](skills/skill-creator/SKILL.md) |
|
||||
@@ -87,15 +85,15 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Fixing bug | `tdd` |
|
||||
| General Prowler development questions | `prowler` |
|
||||
| Implementing JSON:API endpoints | `django-drf` |
|
||||
| Implementing feature | `tdd` |
|
||||
| Importing Copilot Custom Agents into workflows | `gh-aw` |
|
||||
| Implementing feature | `tdd` |
|
||||
| Inspect PR CI checks and gates (.github/workflows/*) | `prowler-ci` |
|
||||
| Inspect PR CI workflows (.github/workflows/*): conventional-commit, pr-check-changelog, pr-conflict-checker, labeler | `prowler-pr` |
|
||||
| Mapping checks to compliance controls | `prowler-compliance` |
|
||||
| Mocking AWS with moto in tests | `prowler-test-sdk` |
|
||||
| Modifying API responses | `jsonapi` |
|
||||
| Modifying component | `tdd` |
|
||||
| Modifying gh-aw workflow frontmatter or safe-outputs | `gh-aw` |
|
||||
| Modifying component | `tdd` |
|
||||
| Refactoring code | `tdd` |
|
||||
| Regenerate AGENTS.md Auto-invoke tables (sync.sh) | `skill-sync` |
|
||||
| Review PR requirements: template, title conventions, changelog gate | `prowler-pr` |
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
> - [`prowler-api`](../skills/prowler-api/SKILL.md) - Models, Serializers, Views, RLS patterns
|
||||
> - [`prowler-test-api`](../skills/prowler-test-api/SKILL.md) - Testing patterns (pytest-django)
|
||||
> - [`prowler-attack-paths-query`](../skills/prowler-attack-paths-query/SKILL.md) - Attack Paths openCypher queries
|
||||
> - [`django-migration-psql`](../skills/django-migration-psql/SKILL.md) - Migration best practices for PostgreSQL
|
||||
> - [`postgresql-indexing`](../skills/postgresql-indexing/SKILL.md) - PostgreSQL indexing, EXPLAIN, monitoring, maintenance
|
||||
> - [`django-drf`](../skills/django-drf/SKILL.md) - Generic DRF patterns
|
||||
> - [`jsonapi`](../skills/jsonapi/SKILL.md) - Strict JSON:API v1.1 spec compliance
|
||||
> - [`pytest`](../skills/pytest/SKILL.md) - Generic pytest patterns
|
||||
@@ -18,20 +16,14 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
|--------|-------|
|
||||
| Add changelog entry for a PR or feature | `prowler-changelog` |
|
||||
| Adding DRF pagination or permissions | `django-drf` |
|
||||
| Adding indexes or constraints to database tables | `django-migration-psql` |
|
||||
| Adding privilege escalation detection queries | `prowler-attack-paths-query` |
|
||||
| Analyzing query performance with EXPLAIN | `postgresql-indexing` |
|
||||
| Committing changes | `prowler-commit` |
|
||||
| Create PR that requires changelog entry | `prowler-changelog` |
|
||||
| Creating API endpoints | `jsonapi` |
|
||||
| Creating Attack Paths queries | `prowler-attack-paths-query` |
|
||||
| Creating ViewSets, serializers, or filters in api/ | `django-drf` |
|
||||
| Creating a git commit | `prowler-commit` |
|
||||
| Creating or modifying PostgreSQL indexes | `postgresql-indexing` |
|
||||
| Creating or reviewing Django migrations | `django-migration-psql` |
|
||||
| Creating/modifying models, views, serializers | `prowler-api` |
|
||||
| Debugging slow queries or missing indexes | `postgresql-indexing` |
|
||||
| Dropping or reindexing PostgreSQL indexes | `postgresql-indexing` |
|
||||
| Fixing bug | `tdd` |
|
||||
| Implementing JSON:API endpoints | `django-drf` |
|
||||
| Implementing feature | `tdd` |
|
||||
@@ -40,14 +32,12 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Refactoring code | `tdd` |
|
||||
| Review changelog format and conventions | `prowler-changelog` |
|
||||
| Reviewing JSON:API compliance | `jsonapi` |
|
||||
| Running makemigrations or pgmakemigrations | `django-migration-psql` |
|
||||
| Testing RLS tenant isolation | `prowler-test-api` |
|
||||
| Update CHANGELOG.md in any component | `prowler-changelog` |
|
||||
| Updating existing Attack Paths queries | `prowler-attack-paths-query` |
|
||||
| Working on task | `tdd` |
|
||||
| Writing Prowler API tests | `prowler-test-api` |
|
||||
| Writing Python tests with pytest | `pytest` |
|
||||
| Writing data backfill or data migration | `django-migration-psql` |
|
||||
|
||||
---
|
||||
|
||||
|
||||
+6
-53
@@ -2,67 +2,20 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.23.0] (Prowler v5.22.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- Finding groups support `check_title` substring filtering [(#10377)](https://github.com/prowler-cloud/prowler/pull/10377)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Finding groups latest endpoint now aggregates the latest snapshot per provider before check-level totals, keeping impacted resources aligned across providers [(#10419)](https://github.com/prowler-cloud/prowler/pull/10419)
|
||||
- Mute rule creation now triggers finding-group summary re-aggregation after historical muting, keeping stats in sync after mute operations [(#10419)](https://github.com/prowler-cloud/prowler/pull/10419)
|
||||
- Attack Paths: Deduplicate nodes before ProwlerFinding lookup in Attack Paths Cypher queries, reducing execution time [(#10424)](https://github.com/prowler-cloud/prowler/pull/10424)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- Replace stdlib XML parser with `defusedxml` in SAML metadata parsing to prevent XML bomb (billion laughs) DoS attacks [(#10165)](https://github.com/prowler-cloud/prowler/pull/10165)
|
||||
- Bump `flask` to 3.1.3 (CVE-2026-27205) and `werkzeug` to 3.1.6 (CVE-2026-27199) [(#10430)](https://github.com/prowler-cloud/prowler/pull/10430)
|
||||
|
||||
---
|
||||
|
||||
## [1.22.1] (Prowler v5.21.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Threat score aggregation query to eliminate unnecessary JOINs and `COUNT(DISTINCT)` overhead [(#10394)](https://github.com/prowler-cloud/prowler/pull/10394)
|
||||
|
||||
---
|
||||
|
||||
## [1.22.0] (Prowler v5.21.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- `CORS_ALLOWED_ORIGINS` configurable via environment variable [(#10355)](https://github.com/prowler-cloud/prowler/pull/10355)
|
||||
- Attack Paths: Tenant and provider related labels to the nodes so they can be easily filtered on custom queries [(#10308)](https://github.com/prowler-cloud/prowler/pull/10308)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Attack Paths: Complete migration to private graph labels and properties, removing deprecated dual-write support [(#10268)](https://github.com/prowler-cloud/prowler/pull/10268)
|
||||
- Attack Paths: Reduce sync and findings memory usage with smaller batches, cursor iteration, and sequential sessions [(#10359)](https://github.com/prowler-cloud/prowler/pull/10359)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Attack Paths: Recover `graph_data_ready` flag when scan fails during graph swap, preventing query endpoints from staying blocked until the next successful scan [(#10354)](https://github.com/prowler-cloud/prowler/pull/10354)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- Use `psycopg2.sql` to safely compose DDL in `PostgresEnumMigration`, preventing SQL injection via f-string interpolation [(#10166)](https://github.com/prowler-cloud/prowler/pull/10166)
|
||||
- Replace stdlib XML parser with `defusedxml` in SAML metadata parsing to prevent XML bomb (billion laughs) DoS attacks [(#10165)](https://github.com/prowler-cloud/prowler/pull/10165)
|
||||
|
||||
---
|
||||
|
||||
## [1.21.0] (Prowler v5.20.0)
|
||||
## [1.21.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Attack Paths: Migrate network exposure queries from APOC to standard openCypher for Neo4j and Neptune compatibility [(#10266)](https://github.com/prowler-cloud/prowler/pull/10266)
|
||||
- `POST /api/v1/providers` returns `409 Conflict` if already exists [(#10293)](https://github.com/prowler-cloud/prowler/pull/10293)
|
||||
|
||||
---
|
||||
|
||||
## [1.20.1] (Prowler UNRELEASED)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Attack Paths: Security hardening for custom query endpoint (Cypher blocklist, input validation, rate limiting, Helm lockdown) [(#10238)](https://github.com/prowler-cloud/prowler/pull/10238)
|
||||
- Attack Paths: Missing logging for query execution and exception details in scan error handling [(#10269)](https://github.com/prowler-cloud/prowler/pull/10269)
|
||||
- Attack Paths: Add missing logging for query execution and exception details in scan error handling [(#10269)](https://github.com/prowler-cloud/prowler/pull/10269)
|
||||
- Attack Paths: Upgrade Cartography from 0.129.0 to 0.132.0, fixing `exposed_internet` not set on ELB/ELBv2 nodes [(#10272)](https://github.com/prowler-cloud/prowler/pull/10272)
|
||||
|
||||
---
|
||||
|
||||
Generated
+38
-388
File diff suppressed because it is too large
Load Diff
+2
-3
@@ -22,10 +22,9 @@ dependencies = [
|
||||
"drf-nested-routers (>=0.94.1,<1.0.0)",
|
||||
"drf-spectacular==0.27.2",
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"defusedxml==0.7.1",
|
||||
"gunicorn==23.0.0",
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.22",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
@@ -50,7 +49,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.23.0"
|
||||
version = "1.21.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -1,21 +1,24 @@
|
||||
import atexit
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from typing import Any
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Iterator
|
||||
from typing import Iterator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
import neo4j.exceptions
|
||||
from config.env import env
|
||||
|
||||
from django.conf import settings
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
from api.attack_paths.retryable_session import RetryableSession
|
||||
from config.env import env
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
DEPRECATED_PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
# Without this Celery goes crazy with Neo4j logging
|
||||
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
||||
@@ -32,7 +35,6 @@ READ_EXCEPTION_CODES = [
|
||||
"Neo.ClientError.Statement.AccessMode",
|
||||
"Neo.ClientError.Procedure.ProcedureNotFound",
|
||||
]
|
||||
CLIENT_STATEMENT_EXCEPTION_PREFIX = "Neo.ClientError.Statement."
|
||||
|
||||
# Module-level process-wide driver singleton
|
||||
_driver: neo4j.Driver | None = None
|
||||
@@ -106,7 +108,6 @@ def get_session(
|
||||
except neo4j.exceptions.Neo4jError as exc:
|
||||
if (
|
||||
default_access_mode == neo4j.READ_ACCESS
|
||||
and exc.code
|
||||
and exc.code in READ_EXCEPTION_CODES
|
||||
):
|
||||
message = "Read query not allowed"
|
||||
@@ -114,10 +115,6 @@ def get_session(
|
||||
raise WriteQueryNotAllowedException(message=message, code=code)
|
||||
|
||||
message = exc.message if exc.message is not None else str(exc)
|
||||
|
||||
if exc.code and exc.code.startswith(CLIENT_STATEMENT_EXCEPTION_PREFIX):
|
||||
raise ClientStatementException(message=message, code=exc.code)
|
||||
|
||||
raise GraphDatabaseQueryException(message=message, code=exc.code)
|
||||
|
||||
finally:
|
||||
@@ -175,7 +172,7 @@ def drop_subgraph(database: str, provider_id: str) -> int:
|
||||
while deleted_count > 0:
|
||||
result = session.run(
|
||||
f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
MATCH (n:{DEPRECATED_PROVIDER_RESOURCE_LABEL} {{provider_id: $provider_id}})
|
||||
WITH n LIMIT $batch_size
|
||||
DETACH DELETE n
|
||||
RETURN COUNT(n) AS deleted_nodes_count
|
||||
@@ -193,29 +190,6 @@ def drop_subgraph(database: str, provider_id: str) -> int:
|
||||
return deleted_nodes
|
||||
|
||||
|
||||
def has_provider_data(database: str, provider_id: str) -> bool:
|
||||
"""
|
||||
Check if any ProviderResource node exists for this provider.
|
||||
|
||||
Returns `False` if the database doesn't exist.
|
||||
"""
|
||||
query = (
|
||||
f"MATCH (n:{PROVIDER_RESOURCE_LABEL} "
|
||||
f"{{{PROVIDER_ID_PROPERTY}: $provider_id}}) "
|
||||
"RETURN 1 LIMIT 1"
|
||||
)
|
||||
|
||||
try:
|
||||
with get_session(database, default_access_mode=neo4j.READ_ACCESS) as session:
|
||||
result = session.run(query, {"provider_id": provider_id})
|
||||
return result.single() is not None
|
||||
|
||||
except GraphDatabaseQueryException as exc:
|
||||
if exc.code == "Neo.ClientError.Database.DatabaseNotFound":
|
||||
return False
|
||||
raise
|
||||
|
||||
|
||||
def clear_cache(database: str) -> None:
|
||||
query = "CALL db.clearQueryCaches()"
|
||||
|
||||
@@ -253,7 +227,3 @@ class GraphDatabaseQueryException(Exception):
|
||||
|
||||
class WriteQueryNotAllowedException(GraphDatabaseQueryException):
|
||||
pass
|
||||
|
||||
|
||||
class ClientStatementException(GraphDatabaseQueryException):
|
||||
pass
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
from tasks.jobs.attack_paths.config import PROVIDER_ID_PROPERTY, PROVIDER_RESOURCE_LABEL
|
||||
from tasks.jobs.attack_paths.config import DEPRECATED_PROVIDER_RESOURCE_LABEL
|
||||
|
||||
CARTOGRAPHY_SCHEMA_METADATA = f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ID_PROPERTY}: $provider_id}})
|
||||
MATCH (n:{DEPRECATED_PROVIDER_RESOURCE_LABEL} {{provider_id: $provider_id}})
|
||||
WHERE n._module_name STARTS WITH 'cartography:'
|
||||
AND NOT n._module_name IN ['cartography:ontology', 'cartography:prowler']
|
||||
AND n._module_version IS NOT NULL
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from typing import Any, Iterable
|
||||
|
||||
@@ -13,12 +12,7 @@ from api.attack_paths.queries.schema import (
|
||||
RAW_SCHEMA_URL,
|
||||
)
|
||||
from config.custom_logging import BackendLogger
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
INTERNAL_LABELS,
|
||||
INTERNAL_PROPERTIES,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
is_dynamic_isolation_label,
|
||||
)
|
||||
from tasks.jobs.attack_paths.config import INTERNAL_LABELS, INTERNAL_PROPERTIES
|
||||
|
||||
logger = logging.getLogger(BackendLogger.API)
|
||||
|
||||
@@ -123,38 +117,6 @@ def execute_query(
|
||||
|
||||
# Custom query helpers
|
||||
|
||||
# Patterns that indicate SSRF or dangerous procedure calls
|
||||
# Defense-in-depth layer - the primary control is `neo4j.READ_ACCESS`
|
||||
_BLOCKED_PATTERNS = [
|
||||
re.compile(r"\bLOAD\s+CSV\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.load\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.import\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.export\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.cypher\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.systemdb\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.config\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.periodic\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.do\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.trigger\b", re.IGNORECASE),
|
||||
re.compile(r"\bapoc\.custom\b", re.IGNORECASE),
|
||||
]
|
||||
|
||||
# Strip string literals so patterns inside quotes don't cause false positives
|
||||
# Handles escaped quotes (\' and \") inside strings
|
||||
_STRING_LITERALS = re.compile(r"'(?:[^'\\]|\\.)*'|\"(?:[^\"\\]|\\.)*\"")
|
||||
|
||||
|
||||
def validate_custom_query(cypher: str) -> None:
|
||||
"""Reject queries containing known SSRF or dangerous procedure patterns.
|
||||
|
||||
Raises ValidationError if a blocked pattern is found.
|
||||
String literals are stripped before matching to avoid false positives.
|
||||
"""
|
||||
stripped = _STRING_LITERALS.sub("", cypher)
|
||||
for pattern in _BLOCKED_PATTERNS:
|
||||
if pattern.search(stripped):
|
||||
raise ValidationError({"query": "Query contains a blocked operation"})
|
||||
|
||||
|
||||
def normalize_custom_query_payload(raw_data):
|
||||
if not isinstance(raw_data, dict):
|
||||
@@ -173,8 +135,6 @@ def execute_custom_query(
|
||||
cypher: str,
|
||||
provider_id: str,
|
||||
) -> dict[str, Any]:
|
||||
validate_custom_query(cypher)
|
||||
|
||||
try:
|
||||
graph = graph_database.execute_read_query(
|
||||
database=database_name,
|
||||
@@ -183,9 +143,6 @@ def execute_custom_query(
|
||||
serialized = _serialize_graph(graph, provider_id)
|
||||
return _truncate_graph(serialized)
|
||||
|
||||
except graph_database.ClientStatementException as exc:
|
||||
raise ValidationError({"query": exc.message})
|
||||
|
||||
except graph_database.WriteQueryNotAllowedException:
|
||||
raise PermissionDenied(
|
||||
"Attack Paths query execution failed: read-only queries are enforced"
|
||||
@@ -258,7 +215,7 @@ def _serialize_graph(graph, provider_id: str) -> dict[str, Any]:
|
||||
nodes = []
|
||||
kept_node_ids = set()
|
||||
for node in graph.nodes:
|
||||
if node._properties.get(PROVIDER_ID_PROPERTY) != provider_id:
|
||||
if node._properties.get("provider_id") != provider_id:
|
||||
continue
|
||||
|
||||
kept_node_ids.add(node.element_id)
|
||||
@@ -270,15 +227,9 @@ def _serialize_graph(graph, provider_id: str) -> dict[str, Any]:
|
||||
},
|
||||
)
|
||||
|
||||
filtered_count = len(graph.nodes) - len(nodes)
|
||||
if filtered_count > 0:
|
||||
logger.debug(
|
||||
f"Filtered {filtered_count} nodes without matching provider_id={provider_id}"
|
||||
)
|
||||
|
||||
relationships = []
|
||||
for relationship in graph.relationships:
|
||||
if relationship._properties.get(PROVIDER_ID_PROPERTY) != provider_id:
|
||||
if relationship._properties.get("provider_id") != provider_id:
|
||||
continue
|
||||
|
||||
if (
|
||||
@@ -306,11 +257,7 @@ def _serialize_graph(graph, provider_id: str) -> dict[str, Any]:
|
||||
|
||||
|
||||
def _filter_labels(labels: Iterable[str]) -> list[str]:
|
||||
return [
|
||||
label
|
||||
for label in labels
|
||||
if label not in INTERNAL_LABELS and not is_dynamic_isolation_label(label)
|
||||
]
|
||||
return [label for label in labels if label not in INTERNAL_LABELS]
|
||||
|
||||
|
||||
def _serialize_properties(properties: dict[str, Any]) -> dict[str, Any]:
|
||||
|
||||
@@ -18,7 +18,6 @@ from django.db import (
|
||||
)
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from psycopg2 import connect as psycopg2_connect
|
||||
from psycopg2 import sql as psycopg2_sql
|
||||
from psycopg2.extensions import AsIs, new_type, register_adapter, register_type
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
@@ -281,23 +280,15 @@ class PostgresEnumMigration:
|
||||
self.enum_values = enum_values
|
||||
|
||||
def create_enum_type(self, apps, schema_editor): # noqa: F841
|
||||
string_enum_values = ", ".join([f"'{value}'" for value in self.enum_values])
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
psycopg2_sql.SQL("CREATE TYPE {} AS ENUM ({})").format(
|
||||
psycopg2_sql.Identifier(self.enum_name),
|
||||
psycopg2_sql.SQL(", ").join(
|
||||
psycopg2_sql.Literal(v) for v in self.enum_values
|
||||
),
|
||||
)
|
||||
f"CREATE TYPE {self.enum_name} AS ENUM ({string_enum_values});"
|
||||
)
|
||||
|
||||
def drop_enum_type(self, apps, schema_editor): # noqa: F841
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
psycopg2_sql.SQL("DROP TYPE {}").format(
|
||||
psycopg2_sql.Identifier(self.enum_name)
|
||||
)
|
||||
)
|
||||
cursor.execute(f"DROP TYPE {self.enum_name};")
|
||||
|
||||
|
||||
class PostgresEnumField(models.Field):
|
||||
|
||||
@@ -926,9 +926,6 @@ class FindingGroupSummaryFilter(FilterSet):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(
|
||||
field_name="check_title", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Provider filters
|
||||
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
|
||||
@@ -1028,9 +1025,6 @@ class LatestFindingGroupSummaryFilter(FilterSet):
|
||||
check_id = CharFilter(field_name="check_id", lookup_expr="exact")
|
||||
check_id__in = CharInFilter(field_name="check_id", lookup_expr="in")
|
||||
check_id__icontains = CharFilter(field_name="check_id", lookup_expr="icontains")
|
||||
check_title__icontains = CharFilter(
|
||||
field_name="check_title", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Provider filters
|
||||
provider_id = UUIDFilter(field_name="provider_id", lookup_expr="exact")
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Generated by Django 5.1.15 on 2026-03-18
|
||||
|
||||
from django.contrib.postgres.indexes import GinIndex, OpClass
|
||||
from django.contrib.postgres.operations import AddIndexConcurrently
|
||||
from django.db import migrations
|
||||
from django.db.models.functions import Upper
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0084_googleworkspace_provider"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
AddIndexConcurrently(
|
||||
model_name="findinggroupdailysummary",
|
||||
index=GinIndex(
|
||||
OpClass(Upper("check_id"), name="gin_trgm_ops"),
|
||||
name="fgds_check_id_trgm_idx",
|
||||
),
|
||||
),
|
||||
AddIndexConcurrently(
|
||||
model_name="findinggroupdailysummary",
|
||||
index=GinIndex(
|
||||
OpClass(Upper("check_title"), name="gin_trgm_ops"),
|
||||
name="fgds_check_title_trgm_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
@@ -8,8 +9,6 @@ from allauth.socialaccount.models import SocialApp
|
||||
from config.custom_logging import BackendLogger
|
||||
from config.settings.social_login import SOCIALACCOUNT_PROVIDERS
|
||||
from cryptography.fernet import Fernet, InvalidToken
|
||||
import defusedxml
|
||||
from defusedxml import ElementTree as ET
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import AbstractBaseUser
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
@@ -1784,15 +1783,6 @@ class FindingGroupDailySummary(RowLevelSecurityProtectedModel):
|
||||
fields=["tenant_id", "provider", "inserted_at"],
|
||||
name="fgds_tenant_prov_ins_idx",
|
||||
),
|
||||
# Trigram indexes for case-insensitive search
|
||||
GinIndex(
|
||||
OpClass(Upper("check_id"), name="gin_trgm_ops"),
|
||||
name="fgds_check_id_trgm_idx",
|
||||
),
|
||||
GinIndex(
|
||||
OpClass(Upper("check_title"), name="gin_trgm_ops"),
|
||||
name="fgds_check_title_trgm_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -2068,8 +2058,6 @@ class SAMLConfiguration(RowLevelSecurityProtectedModel):
|
||||
root = ET.fromstring(self.metadata_xml)
|
||||
except ET.ParseError as e:
|
||||
raise ValidationError({"metadata_xml": f"Invalid XML: {e}"})
|
||||
except defusedxml.DefusedXmlException as e:
|
||||
raise ValidationError({"metadata_xml": f"Unsafe XML content rejected: {e}"})
|
||||
|
||||
# Entity ID
|
||||
entity_id = root.attrib.get("entityID")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.23.0
|
||||
version: 1.21.0
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
|
||||
@@ -301,7 +301,7 @@ class TestTokenSwitchTenant:
|
||||
assert invalid_tenant_response.status_code == 400
|
||||
assert invalid_tenant_response.json()["errors"][0]["code"] == "invalid"
|
||||
assert invalid_tenant_response.json()["errors"][0]["detail"] == (
|
||||
"Tenant does not exist or user is not a member."
|
||||
"Tenant does not exist or user is not a " "member."
|
||||
)
|
||||
|
||||
|
||||
@@ -912,9 +912,10 @@ class TestAPIKeyLifecycle:
|
||||
auth_response = client.get(reverse("provider-list"), headers=api_key_headers)
|
||||
|
||||
# Must return 401 Unauthorized, not 500 Internal Server Error
|
||||
assert (
|
||||
auth_response.status_code == 401
|
||||
), f"Expected 401 but got {auth_response.status_code}: {auth_response.json()}"
|
||||
assert auth_response.status_code == 401, (
|
||||
f"Expected 401 but got {auth_response.status_code}: "
|
||||
f"{auth_response.json()}"
|
||||
)
|
||||
|
||||
# Verify error message is present
|
||||
response_json = auth_response.json()
|
||||
|
||||
@@ -10,11 +10,11 @@ from django.conf import settings
|
||||
import api
|
||||
import api.apps as api_apps_module
|
||||
from api.apps import (
|
||||
ApiConfig,
|
||||
PRIVATE_KEY_FILE,
|
||||
PUBLIC_KEY_FILE,
|
||||
SIGNING_KEY_ENV,
|
||||
VERIFYING_KEY_ENV,
|
||||
ApiConfig,
|
||||
)
|
||||
|
||||
|
||||
@@ -187,10 +187,9 @@ def test_ready_initializes_driver_for_api_process(monkeypatch):
|
||||
_set_argv(monkeypatch, ["gunicorn"])
|
||||
_set_testing(monkeypatch, False)
|
||||
|
||||
with (
|
||||
patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None),
|
||||
patch("api.attack_paths.database.init_driver") as init_driver,
|
||||
):
|
||||
with patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None), patch(
|
||||
"api.attack_paths.database.init_driver"
|
||||
) as init_driver:
|
||||
config.ready()
|
||||
|
||||
init_driver.assert_called_once()
|
||||
@@ -201,10 +200,9 @@ def test_ready_skips_driver_for_celery(monkeypatch):
|
||||
_set_argv(monkeypatch, ["celery", "-A", "api"])
|
||||
_set_testing(monkeypatch, False)
|
||||
|
||||
with (
|
||||
patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None),
|
||||
patch("api.attack_paths.database.init_driver") as init_driver,
|
||||
):
|
||||
with patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None), patch(
|
||||
"api.attack_paths.database.init_driver"
|
||||
) as init_driver:
|
||||
config.ready()
|
||||
|
||||
init_driver.assert_not_called()
|
||||
@@ -215,10 +213,9 @@ def test_ready_skips_driver_for_manage_py_skip_command(monkeypatch):
|
||||
_set_argv(monkeypatch, ["manage.py", "migrate"])
|
||||
_set_testing(monkeypatch, False)
|
||||
|
||||
with (
|
||||
patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None),
|
||||
patch("api.attack_paths.database.init_driver") as init_driver,
|
||||
):
|
||||
with patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None), patch(
|
||||
"api.attack_paths.database.init_driver"
|
||||
) as init_driver:
|
||||
config.ready()
|
||||
|
||||
init_driver.assert_not_called()
|
||||
@@ -229,10 +226,9 @@ def test_ready_skips_driver_when_testing(monkeypatch):
|
||||
_set_argv(monkeypatch, ["gunicorn"])
|
||||
_set_testing(monkeypatch, True)
|
||||
|
||||
with (
|
||||
patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None),
|
||||
patch("api.attack_paths.database.init_driver") as init_driver,
|
||||
):
|
||||
with patch.object(ApiConfig, "_ensure_crypto_keys", return_value=None), patch(
|
||||
"api.attack_paths.database.init_driver"
|
||||
) as init_driver:
|
||||
config.ready()
|
||||
|
||||
init_driver.assert_not_called()
|
||||
|
||||
@@ -9,10 +9,6 @@ from rest_framework.exceptions import APIException, PermissionDenied, Validation
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.attack_paths import views_helpers
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
)
|
||||
|
||||
|
||||
def _make_neo4j_error(message, code):
|
||||
@@ -112,7 +108,7 @@ def test_execute_query_serializes_graph(
|
||||
labels=["AWSAccount"],
|
||||
properties={
|
||||
"name": "account",
|
||||
PROVIDER_ID_PROPERTY: provider_id,
|
||||
"provider_id": provider_id,
|
||||
"complex": {
|
||||
"items": [
|
||||
attack_paths_graph_stub_classes.NativeValue("value"),
|
||||
@@ -122,14 +118,14 @@ def test_execute_query_serializes_graph(
|
||||
},
|
||||
)
|
||||
node_2 = attack_paths_graph_stub_classes.Node(
|
||||
"node-2", ["RDSInstance"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"node-2", ["RDSInstance"], {"provider_id": provider_id}
|
||||
)
|
||||
relationship = attack_paths_graph_stub_classes.Relationship(
|
||||
element_id="rel-1",
|
||||
rel_type="OWNS",
|
||||
start_node=node,
|
||||
end_node=node_2,
|
||||
properties={"weight": 1, PROVIDER_ID_PROPERTY: provider_id},
|
||||
properties={"weight": 1, "provider_id": provider_id},
|
||||
)
|
||||
graph = SimpleNamespace(nodes=[node, node_2], relationships=[relationship])
|
||||
|
||||
@@ -217,20 +213,20 @@ def test_serialize_graph_filters_by_provider_id(attack_paths_graph_stub_classes)
|
||||
provider_id = "provider-keep"
|
||||
|
||||
node_keep = attack_paths_graph_stub_classes.Node(
|
||||
"n1", ["AWSAccount"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"n1", ["AWSAccount"], {"provider_id": provider_id}
|
||||
)
|
||||
node_drop = attack_paths_graph_stub_classes.Node(
|
||||
"n2", ["AWSAccount"], {PROVIDER_ID_PROPERTY: "provider-other"}
|
||||
"n2", ["AWSAccount"], {"provider_id": "provider-other"}
|
||||
)
|
||||
|
||||
rel_keep = attack_paths_graph_stub_classes.Relationship(
|
||||
"r1", "OWNS", node_keep, node_keep, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"r1", "OWNS", node_keep, node_keep, {"provider_id": provider_id}
|
||||
)
|
||||
rel_drop_by_provider = attack_paths_graph_stub_classes.Relationship(
|
||||
"r2", "OWNS", node_keep, node_drop, {PROVIDER_ID_PROPERTY: "provider-other"}
|
||||
"r2", "OWNS", node_keep, node_drop, {"provider_id": "provider-other"}
|
||||
)
|
||||
rel_drop_orphaned = attack_paths_graph_stub_classes.Relationship(
|
||||
"r3", "OWNS", node_keep, node_drop, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"r3", "OWNS", node_keep, node_drop, {"provider_id": provider_id}
|
||||
)
|
||||
|
||||
graph = SimpleNamespace(
|
||||
@@ -354,8 +350,10 @@ def test_serialize_properties_filters_internal_fields():
|
||||
"_module_name": "cartography:aws",
|
||||
"_module_version": "0.98.0",
|
||||
# Provider isolation
|
||||
PROVIDER_ID_PROPERTY: "42",
|
||||
PROVIDER_ELEMENT_ID_PROPERTY: "42:abc123",
|
||||
"_provider_id": "42",
|
||||
"_provider_element_id": "42:abc123",
|
||||
"provider_id": "42",
|
||||
"provider_element_id": "42:abc123",
|
||||
}
|
||||
|
||||
result = views_helpers._serialize_properties(properties)
|
||||
@@ -363,14 +361,6 @@ def test_serialize_properties_filters_internal_fields():
|
||||
assert result == {"name": "prod"}
|
||||
|
||||
|
||||
def test_filter_labels_strips_dynamic_isolation_labels():
|
||||
labels = ["AWSRole", "_Tenant_abc123", "_Provider_def456", "_ProviderResource"]
|
||||
|
||||
result = views_helpers._filter_labels(labels)
|
||||
|
||||
assert result == ["AWSRole"]
|
||||
|
||||
|
||||
def test_serialize_graph_as_text_node_without_properties():
|
||||
graph = {
|
||||
"nodes": [{"id": "n1", "labels": ["AWSAccount"], "properties": {}}],
|
||||
@@ -450,13 +440,13 @@ def test_execute_custom_query_serializes_graph(
|
||||
):
|
||||
provider_id = "test-provider-123"
|
||||
node_1 = attack_paths_graph_stub_classes.Node(
|
||||
"node-1", ["AWSAccount"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"node-1", ["AWSAccount"], {"provider_id": provider_id}
|
||||
)
|
||||
node_2 = attack_paths_graph_stub_classes.Node(
|
||||
"node-2", ["RDSInstance"], {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"node-2", ["RDSInstance"], {"provider_id": provider_id}
|
||||
)
|
||||
relationship = attack_paths_graph_stub_classes.Relationship(
|
||||
"rel-1", "OWNS", node_1, node_2, {PROVIDER_ID_PROPERTY: provider_id}
|
||||
"rel-1", "OWNS", node_1, node_2, {"provider_id": provider_id}
|
||||
)
|
||||
|
||||
graph_result = MagicMock()
|
||||
@@ -511,72 +501,6 @@ def test_execute_custom_query_wraps_graph_errors():
|
||||
mock_logger.error.assert_called_once()
|
||||
|
||||
|
||||
# -- validate_custom_query ------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"LOAD CSV FROM 'http://169.254.169.254/' AS x RETURN x",
|
||||
"load csv from 'http://evil.com' as row return row",
|
||||
"CALL apoc.load.json('http://evil.com/') YIELD value RETURN value",
|
||||
"CALL apoc.load.csvParams('http://evil.com/', {}, null) YIELD list RETURN list",
|
||||
"CALL apoc.import.csv([{fileName: 'f'}], [], {}) YIELD node RETURN node",
|
||||
"CALL apoc.export.csv.all('file.csv', {})",
|
||||
"CALL apoc.cypher.run('CREATE (n)', {}) YIELD value RETURN value",
|
||||
"CALL apoc.systemdb.graph() YIELD nodes RETURN nodes",
|
||||
"CALL apoc.config.list() YIELD key, value RETURN key, value",
|
||||
"CALL apoc.periodic.iterate('MATCH (n) RETURN n', 'DELETE n', {batchSize: 100})",
|
||||
"CALL apoc.do.when(true, 'CREATE (n) RETURN n', '', {}) YIELD value RETURN value",
|
||||
"CALL apoc.trigger.add('t', 'RETURN 1', {phase: 'before'})",
|
||||
"CALL apoc.custom.asProcedure('myProc', 'RETURN 1')",
|
||||
],
|
||||
ids=[
|
||||
"LOAD_CSV",
|
||||
"LOAD_CSV_lowercase",
|
||||
"apoc.load.json",
|
||||
"apoc.load.csvParams",
|
||||
"apoc.import.csv",
|
||||
"apoc.export.csv",
|
||||
"apoc.cypher.run",
|
||||
"apoc.systemdb.graph",
|
||||
"apoc.config.list",
|
||||
"apoc.periodic.iterate",
|
||||
"apoc.do.when",
|
||||
"apoc.trigger.add",
|
||||
"apoc.custom.asProcedure",
|
||||
],
|
||||
)
|
||||
def test_validate_custom_query_rejects_blocked_patterns(cypher):
|
||||
with pytest.raises(ValidationError) as exc:
|
||||
views_helpers.validate_custom_query(cypher)
|
||||
|
||||
assert "blocked operation" in str(exc.value.detail)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"MATCH (n:AWSAccount) RETURN n LIMIT 10",
|
||||
"MATCH (a)-[r]->(b) RETURN a, r, b",
|
||||
"MATCH (n) WHERE n.name CONTAINS 'load' RETURN n",
|
||||
"CALL apoc.create.vNode(['Label'], {}) YIELD node RETURN node",
|
||||
"MATCH (n) WHERE n.name = 'apoc.load.json' RETURN n",
|
||||
'MATCH (n) WHERE n.description = "LOAD CSV is cool" RETURN n',
|
||||
],
|
||||
ids=[
|
||||
"simple_match",
|
||||
"traversal",
|
||||
"contains_load_substring",
|
||||
"apoc_virtual_node",
|
||||
"apoc_load_inside_single_quotes",
|
||||
"load_csv_inside_double_quotes",
|
||||
],
|
||||
)
|
||||
def test_validate_custom_query_allows_clean_queries(cypher):
|
||||
views_helpers.validate_custom_query(cypher)
|
||||
|
||||
|
||||
# -- _truncate_graph ----------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
@@ -442,78 +442,3 @@ class TestThreadSafety:
|
||||
# All threads got the same driver instance
|
||||
assert all(r is mock_driver for r in results)
|
||||
assert len(results) == 10
|
||||
|
||||
|
||||
class TestHasProviderData:
|
||||
"""Test has_provider_data helper for checking provider nodes in Neo4j."""
|
||||
|
||||
def test_returns_true_when_nodes_exist(self):
|
||||
import api.attack_paths.database as db_module
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.single.return_value = MagicMock() # non-None record
|
||||
mock_session.run.return_value = mock_result
|
||||
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.return_value = mock_session
|
||||
session_ctx.__exit__.return_value = False
|
||||
|
||||
with patch(
|
||||
"api.attack_paths.database.get_session",
|
||||
return_value=session_ctx,
|
||||
):
|
||||
assert db_module.has_provider_data("db-tenant-abc", "provider-123") is True
|
||||
|
||||
mock_session.run.assert_called_once()
|
||||
|
||||
def test_returns_false_when_no_nodes(self):
|
||||
import api.attack_paths.database as db_module
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_result = MagicMock()
|
||||
mock_result.single.return_value = None
|
||||
mock_session.run.return_value = mock_result
|
||||
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.return_value = mock_session
|
||||
session_ctx.__exit__.return_value = False
|
||||
|
||||
with patch(
|
||||
"api.attack_paths.database.get_session",
|
||||
return_value=session_ctx,
|
||||
):
|
||||
assert db_module.has_provider_data("db-tenant-abc", "provider-123") is False
|
||||
|
||||
def test_returns_false_when_database_not_found(self):
|
||||
import api.attack_paths.database as db_module
|
||||
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.side_effect = db_module.GraphDatabaseQueryException(
|
||||
message="Database does not exist",
|
||||
code="Neo.ClientError.Database.DatabaseNotFound",
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.attack_paths.database.get_session",
|
||||
return_value=session_ctx,
|
||||
):
|
||||
assert (
|
||||
db_module.has_provider_data("db-tenant-gone", "provider-123") is False
|
||||
)
|
||||
|
||||
def test_raises_on_other_errors(self):
|
||||
import api.attack_paths.database as db_module
|
||||
|
||||
session_ctx = MagicMock()
|
||||
session_ctx.__enter__.side_effect = db_module.GraphDatabaseQueryException(
|
||||
message="Connection refused",
|
||||
code="Neo.TransientError.General.UnknownError",
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.attack_paths.database.get_session",
|
||||
return_value=session_ctx,
|
||||
):
|
||||
with pytest.raises(db_module.GraphDatabaseQueryException):
|
||||
db_module.has_provider_data("db-tenant-abc", "provider-123")
|
||||
|
||||
@@ -6,12 +6,10 @@ import pytest
|
||||
from django.conf import settings
|
||||
from django.db import DEFAULT_DB_ALIAS, OperationalError
|
||||
from freezegun import freeze_time
|
||||
from psycopg2 import sql as psycopg2_sql
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.db_utils import (
|
||||
POSTGRES_TENANT_VAR,
|
||||
PostgresEnumMigration,
|
||||
_should_create_index_on_partition,
|
||||
batch_delete,
|
||||
create_objects_in_batches,
|
||||
@@ -912,61 +910,3 @@ class TestRlsTransaction:
|
||||
cursor.execute("SELECT 1")
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == 1
|
||||
|
||||
|
||||
class TestPostgresEnumMigration:
|
||||
"""
|
||||
Verify that PostgresEnumMigration builds DDL statements via psycopg2.sql
|
||||
so that enum type names and values are always properly quoted — preventing
|
||||
SQL injection through f-string interpolation.
|
||||
"""
|
||||
|
||||
def _make_mock_schema_editor(self):
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__ = MagicMock(return_value=mock_cursor)
|
||||
mock_conn.cursor.return_value.__exit__ = MagicMock(return_value=False)
|
||||
mock_schema_editor = MagicMock()
|
||||
mock_schema_editor.connection = mock_conn
|
||||
return mock_schema_editor, mock_cursor
|
||||
|
||||
def test_create_enum_type_generates_correct_sql(self):
|
||||
"""create_enum_type builds a proper CREATE TYPE … AS ENUM via psycopg2.sql."""
|
||||
migration = PostgresEnumMigration("my_enum", ("val_a", "val_b"))
|
||||
schema_editor, mock_cursor = self._make_mock_schema_editor()
|
||||
|
||||
migration.create_enum_type(apps=None, schema_editor=schema_editor)
|
||||
|
||||
mock_cursor.execute.assert_called_once()
|
||||
query_arg = mock_cursor.execute.call_args[0][0]
|
||||
assert isinstance(
|
||||
query_arg, psycopg2_sql.Composable
|
||||
), "create_enum_type must pass a psycopg2.sql.Composable, not a raw string."
|
||||
# Verify the composed SQL structure: CREATE TYPE <Identifier> AS ENUM (<Literals>)
|
||||
parts = query_arg.seq
|
||||
assert parts[0] == psycopg2_sql.SQL("CREATE TYPE ")
|
||||
assert isinstance(parts[1], psycopg2_sql.Identifier)
|
||||
assert parts[1].strings == ("my_enum",)
|
||||
assert parts[2] == psycopg2_sql.SQL(" AS ENUM (")
|
||||
# The enum values are a Composed of Literal items joined by ", "
|
||||
enum_literals = [p for p in parts[3].seq if isinstance(p, psycopg2_sql.Literal)]
|
||||
assert [lit._wrapped for lit in enum_literals] == ["val_a", "val_b"]
|
||||
assert parts[4] == psycopg2_sql.SQL(")")
|
||||
|
||||
def test_drop_enum_type_generates_correct_sql(self):
|
||||
"""drop_enum_type builds a proper DROP TYPE via psycopg2.sql."""
|
||||
migration = PostgresEnumMigration("my_enum", ("val_a",))
|
||||
schema_editor, mock_cursor = self._make_mock_schema_editor()
|
||||
|
||||
migration.drop_enum_type(apps=None, schema_editor=schema_editor)
|
||||
|
||||
mock_cursor.execute.assert_called_once()
|
||||
query_arg = mock_cursor.execute.call_args[0][0]
|
||||
assert isinstance(
|
||||
query_arg, psycopg2_sql.Composable
|
||||
), "drop_enum_type must pass a psycopg2.sql.Composable, not a raw string."
|
||||
# Verify the composed SQL structure: DROP TYPE <Identifier>
|
||||
parts = query_arg.seq
|
||||
assert parts[0] == psycopg2_sql.SQL("DROP TYPE ")
|
||||
assert isinstance(parts[1], psycopg2_sql.Identifier)
|
||||
assert parts[1].strings == ("my_enum",)
|
||||
|
||||
@@ -243,39 +243,6 @@ class TestSAMLConfigurationModel:
|
||||
assert "Invalid XML" in errors["metadata_xml"][0]
|
||||
assert "not well-formed" in errors["metadata_xml"][0]
|
||||
|
||||
def test_xml_bomb_rejected(self, tenants_fixture):
|
||||
"""
|
||||
Regression test: a 'billion laughs' XML bomb in the SAML metadata field
|
||||
must be rejected and not allowed to exhaust server memory / CPU.
|
||||
|
||||
Before the fix, xml.etree.ElementTree was used directly, which does not
|
||||
protect against entity-expansion attacks. The fix switches to defusedxml
|
||||
which raises an exception for any XML containing entity definitions.
|
||||
"""
|
||||
tenant = tenants_fixture[0]
|
||||
xml_bomb = (
|
||||
"<?xml version='1.0'?>"
|
||||
"<!DOCTYPE bomb ["
|
||||
" <!ENTITY a 'aaaaaaaaaa'>"
|
||||
" <!ENTITY b '&a;&a;&a;&a;&a;&a;&a;&a;&a;&a;'>"
|
||||
" <!ENTITY c '&b;&b;&b;&b;&b;&b;&b;&b;&b;&b;'>"
|
||||
" <!ENTITY d '&c;&c;&c;&c;&c;&c;&c;&c;&c;&c;'>"
|
||||
"]>"
|
||||
"<md:EntityDescriptor entityID='&d;' "
|
||||
"xmlns:md='urn:oasis:names:tc:SAML:2.0:metadata'/>"
|
||||
)
|
||||
config = SAMLConfiguration(
|
||||
email_domain="xmlbomb.com",
|
||||
metadata_xml=xml_bomb,
|
||||
tenant=tenant,
|
||||
)
|
||||
|
||||
with pytest.raises(ValidationError) as exc_info:
|
||||
config._parse_metadata()
|
||||
|
||||
errors = exc_info.value.message_dict
|
||||
assert "metadata_xml" in errors
|
||||
|
||||
def test_metadata_missing_sso_fails(self, tenants_fixture):
|
||||
tenant = tenants_fixture[0]
|
||||
xml = """<md:EntityDescriptor entityID="x" xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata">
|
||||
|
||||
@@ -45,7 +45,6 @@ from api.models import (
|
||||
ComplianceRequirementOverview,
|
||||
DailySeveritySummary,
|
||||
Finding,
|
||||
FindingGroupDailySummary,
|
||||
Integration,
|
||||
Invitation,
|
||||
LighthouseProviderConfiguration,
|
||||
@@ -3811,12 +3810,6 @@ class TestTaskViewSet:
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAttackPathsScanViewSet:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _clear_throttle_cache(self):
|
||||
from django.core.cache import cache
|
||||
|
||||
cache.clear()
|
||||
|
||||
@staticmethod
|
||||
def _run_payload(query_id="aws-rds", parameters=None):
|
||||
return {
|
||||
@@ -4418,6 +4411,8 @@ class TestAttackPathsScanViewSet:
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: Remove skip once queries/custom and schema endpoints are unblocked
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_run_custom_query_returns_graph(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -4475,6 +4470,7 @@ class TestAttackPathsScanViewSet:
|
||||
assert attributes["total_nodes"] == 1
|
||||
assert attributes["truncated"] is False
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_run_custom_query_returns_text_when_accept_text_plain(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -4529,6 +4525,7 @@ class TestAttackPathsScanViewSet:
|
||||
assert "## Relationships (0)" in body
|
||||
assert "## Summary" in body
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_run_custom_query_returns_404_when_no_nodes(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -4570,6 +4567,7 @@ class TestAttackPathsScanViewSet:
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_run_custom_query_returns_400_when_graph_not_ready(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -4596,6 +4594,7 @@ class TestAttackPathsScanViewSet:
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert "not available" in response.json()["errors"][0]["detail"]
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_run_custom_query_returns_403_for_write_query(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -4633,343 +4632,9 @@ class TestAttackPathsScanViewSet:
|
||||
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
|
||||
# -- SSRF blocklist (HTTP level) ----------------------------------------------
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cypher",
|
||||
[
|
||||
"LOAD CSV FROM 'http://169.254.169.254/' AS x RETURN x",
|
||||
"CALL apoc.load.json('http://evil.com/') YIELD value RETURN value",
|
||||
"CALL apoc.import.csv([{fileName: 'f'}], [], {}) YIELD node RETURN node",
|
||||
"CALL apoc.export.csv.all('file.csv', {})",
|
||||
"CALL apoc.cypher.run('CREATE (n)', {}) YIELD value RETURN value",
|
||||
"CALL apoc.systemdb.graph() YIELD nodes RETURN nodes",
|
||||
],
|
||||
ids=[
|
||||
"LOAD_CSV",
|
||||
"apoc.load",
|
||||
"apoc.import",
|
||||
"apoc.export",
|
||||
"apoc.cypher.run",
|
||||
"apoc.systemdb",
|
||||
],
|
||||
)
|
||||
def test_run_custom_query_rejects_ssrf_patterns(
|
||||
self,
|
||||
authenticated_client,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
cypher,
|
||||
):
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.v1.views.graph_database.get_database_name",
|
||||
return_value="db-test",
|
||||
):
|
||||
response = authenticated_client.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(cypher),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert "blocked" in response.json()["errors"][0]["detail"].lower()
|
||||
|
||||
# -- Cross-tenant isolation ---------------------------------------------------
|
||||
|
||||
def test_run_custom_query_returns_404_for_foreign_tenant(
|
||||
self,
|
||||
authenticated_client,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from api.models import Provider, Tenant
|
||||
|
||||
foreign_tenant = Tenant.objects.create(name="foreign-tenant")
|
||||
foreign_provider = Provider.objects.create(
|
||||
tenant=foreign_tenant,
|
||||
provider="aws",
|
||||
uid="123456789999",
|
||||
)
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
foreign_provider,
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.v1.views.graph_database.get_database_name",
|
||||
return_value="db-test",
|
||||
):
|
||||
response = authenticated_client.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_cartography_schema_returns_404_for_foreign_tenant(
|
||||
self,
|
||||
authenticated_client,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from api.models import Provider, Tenant
|
||||
|
||||
foreign_tenant = Tenant.objects.create(name="foreign-tenant-schema")
|
||||
foreign_provider = Provider.objects.create(
|
||||
tenant=foreign_tenant,
|
||||
provider="aws",
|
||||
uid="123456789998",
|
||||
)
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
foreign_provider,
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
response = authenticated_client.get(
|
||||
reverse(
|
||||
"attack-paths-scans-schema",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
)
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
# -- Authentication / authorization -------------------------------------------
|
||||
|
||||
def test_run_custom_query_returns_401_unauthenticated(
|
||||
self,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
unauthenticated = APIClient()
|
||||
response = unauthenticated.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_401_UNAUTHORIZED
|
||||
|
||||
def test_cartography_schema_returns_401_unauthenticated(
|
||||
self,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
unauthenticated = APIClient()
|
||||
response = unauthenticated.get(
|
||||
reverse(
|
||||
"attack-paths-scans-schema",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
)
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_401_UNAUTHORIZED
|
||||
|
||||
def test_run_custom_query_returns_403_no_manage_scans(
|
||||
self,
|
||||
authenticated_client_no_permissions_rbac,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
response = authenticated_client_no_permissions_rbac.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
|
||||
# -- Error leakage ------------------------------------------------------------
|
||||
|
||||
def test_run_custom_query_does_not_leak_internals_on_error(
|
||||
self,
|
||||
authenticated_client,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from rest_framework.exceptions import APIException
|
||||
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"api.v1.views.attack_paths_views_helpers.execute_custom_query",
|
||||
side_effect=APIException(
|
||||
"Attack Paths query execution failed due to a database error"
|
||||
),
|
||||
),
|
||||
patch(
|
||||
"api.v1.views.graph_database.get_database_name",
|
||||
return_value="db-test",
|
||||
),
|
||||
):
|
||||
response = authenticated_client.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
body = json.dumps(response.json()).lower()
|
||||
for forbidden_term in ["neo4j", "bolt://", "syntaxerror", "db-tenant-"]:
|
||||
assert forbidden_term not in body
|
||||
|
||||
# -- Rate limiting (throttle) -------------------------------------------------
|
||||
|
||||
def test_run_custom_query_throttled_after_limit(
|
||||
self,
|
||||
authenticated_client,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
mock_graph = {
|
||||
"nodes": [{"id": "n1", "labels": ["Test"], "properties": {}}],
|
||||
"relationships": [],
|
||||
"total_nodes": 1,
|
||||
"truncated": False,
|
||||
}
|
||||
|
||||
url = reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
)
|
||||
payload = self._custom_query_payload()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"api.v1.views.attack_paths_views_helpers.execute_custom_query",
|
||||
return_value=mock_graph,
|
||||
),
|
||||
patch(
|
||||
"api.v1.views.graph_database.get_database_name",
|
||||
return_value="db-test",
|
||||
),
|
||||
patch(
|
||||
"api.v1.views.graph_database.clear_cache",
|
||||
),
|
||||
):
|
||||
for i in range(11):
|
||||
response = authenticated_client.post(
|
||||
url,
|
||||
data=payload,
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
if i < 10:
|
||||
assert (
|
||||
response.status_code == status.HTTP_200_OK
|
||||
), f"Request {i + 1} should succeed with 200 OK, got {response.status_code}"
|
||||
else:
|
||||
assert (
|
||||
response.status_code == status.HTTP_429_TOO_MANY_REQUESTS
|
||||
), f"Request {i + 1} should be throttled"
|
||||
|
||||
# -- Timeout simulation -------------------------------------------------------
|
||||
|
||||
def test_run_custom_query_returns_500_on_database_timeout(
|
||||
self,
|
||||
authenticated_client,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
create_attack_paths_scan,
|
||||
):
|
||||
from rest_framework.exceptions import APIException
|
||||
|
||||
provider = providers_fixture[0]
|
||||
attack_paths_scan = create_attack_paths_scan(
|
||||
provider,
|
||||
scan=scans_fixture[0],
|
||||
graph_data_ready=True,
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"api.v1.views.attack_paths_views_helpers.execute_custom_query",
|
||||
side_effect=APIException(
|
||||
"Attack Paths query execution failed due to a database error"
|
||||
),
|
||||
),
|
||||
patch(
|
||||
"api.v1.views.graph_database.get_database_name",
|
||||
return_value="db-test",
|
||||
),
|
||||
):
|
||||
response = authenticated_client.post(
|
||||
reverse(
|
||||
"attack-paths-scans-queries-custom",
|
||||
kwargs={"pk": attack_paths_scan.id},
|
||||
),
|
||||
data=self._custom_query_payload(),
|
||||
content_type=API_JSON_CONTENT_TYPE,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
|
||||
# -- cartography_schema action ------------------------------------------------
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_cartography_schema_returns_urls(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -5019,6 +4684,7 @@ class TestAttackPathsScanViewSet:
|
||||
assert "schema.md" in attributes["schema_url"]
|
||||
assert "raw.githubusercontent.com" in attributes["raw_schema_url"]
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_cartography_schema_returns_404_when_no_metadata(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -5053,6 +4719,7 @@ class TestAttackPathsScanViewSet:
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert "No cartography schema metadata" in str(response.json())
|
||||
|
||||
@pytest.mark.skip(reason="Endpoint temporarily blocked")
|
||||
def test_cartography_schema_returns_400_when_graph_not_ready(
|
||||
self,
|
||||
authenticated_client,
|
||||
@@ -7859,12 +7526,8 @@ class TestUserRoleRelationshipViewSet:
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
relationships = UserRoleRelationship.objects.filter(user=create_test_user.id)
|
||||
assert relationships.count() == 4
|
||||
# Use set membership instead of positional slicing — QuerySet ordering is
|
||||
# non-deterministic without an explicit order_by, which makes slice-based
|
||||
# checks intermittently fail.
|
||||
added_role_ids = {r.id for r in roles_fixture[:2]}
|
||||
relationship_role_ids = {rel.role.id for rel in relationships}
|
||||
assert added_role_ids.issubset(relationship_role_ids)
|
||||
for relationship in relationships[2:]: # Skip admin role
|
||||
assert relationship.role.id in [r.id for r in roles_fixture[:2]]
|
||||
|
||||
def test_create_relationship_already_exists(
|
||||
self, authenticated_client, roles_fixture, create_test_user
|
||||
@@ -14690,16 +14353,10 @@ class TestMuteRuleViewSet:
|
||||
assert len(data) == 2
|
||||
assert data[0]["id"] == str(mute_rules_fixture[first_index].id)
|
||||
|
||||
@patch("api.v1.views.chain")
|
||||
@patch("api.v1.views.aggregate_finding_group_summaries_task.si")
|
||||
@patch("api.v1.views.mute_historical_findings_task.si")
|
||||
@patch("api.v1.views.transaction.on_commit", side_effect=lambda fn: fn())
|
||||
@patch("tasks.tasks.mute_historical_findings_task.apply_async")
|
||||
def test_mute_rules_create_valid(
|
||||
self,
|
||||
_mock_on_commit,
|
||||
mock_mute_signature,
|
||||
mock_aggregate_signature,
|
||||
mock_chain,
|
||||
mock_task,
|
||||
authenticated_client,
|
||||
findings_fixture,
|
||||
create_test_user,
|
||||
@@ -14737,14 +14394,8 @@ class TestMuteRuleViewSet:
|
||||
assert finding.muted_at is not None
|
||||
assert finding.muted_reason == "Security exception approved"
|
||||
|
||||
# Verify background task chain was called
|
||||
mock_mute_signature.assert_called_once()
|
||||
mock_aggregate_signature.assert_called_once()
|
||||
mock_chain.assert_called_once_with(
|
||||
mock_mute_signature.return_value,
|
||||
mock_aggregate_signature.return_value,
|
||||
)
|
||||
mock_chain.return_value.apply_async.assert_called_once()
|
||||
# Verify background task was called
|
||||
mock_task.assert_called_once()
|
||||
|
||||
@patch("tasks.tasks.mute_historical_findings_task.apply_async")
|
||||
def test_mute_rules_create_converts_finding_ids_to_uids(
|
||||
@@ -15539,22 +15190,6 @@ class TestFindingGroupViewSet:
|
||||
assert len(response.json()["data"]) == 1
|
||||
assert "bucket" in response.json()["data"][0]["id"].lower()
|
||||
|
||||
def test_finding_groups_check_title_icontains(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
"""Test searching check titles with icontains."""
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-list"),
|
||||
{
|
||||
"filter[inserted_at]": TODAY,
|
||||
"filter[check_title.icontains]": "public access",
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
assert data[0]["id"] == "s3_bucket_public_access"
|
||||
|
||||
def test_resources_not_found(self, authenticated_client):
|
||||
"""Test 404 returned for nonexistent check_id."""
|
||||
response = authenticated_client.get(
|
||||
@@ -15853,48 +15488,6 @@ class TestFindingGroupViewSet:
|
||||
assert len(data) == 1
|
||||
assert data[0]["id"] == "cloudtrail_enabled"
|
||||
|
||||
def test_finding_groups_latest_aggregates_latest_per_provider(
|
||||
self, authenticated_client, providers_fixture
|
||||
):
|
||||
"""Test /latest aggregates latest summary from each provider for the same check."""
|
||||
provider1 = providers_fixture[0]
|
||||
provider2 = providers_fixture[1]
|
||||
|
||||
check_id = "cross_provider_latest_resources_total"
|
||||
now = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
|
||||
|
||||
FindingGroupDailySummary.objects.create(
|
||||
tenant_id=provider1.tenant_id,
|
||||
provider=provider1,
|
||||
check_id=check_id,
|
||||
inserted_at=now - timedelta(days=1),
|
||||
resources_total=20,
|
||||
resources_fail=20,
|
||||
fail_count=20,
|
||||
)
|
||||
FindingGroupDailySummary.objects.create(
|
||||
tenant_id=provider2.tenant_id,
|
||||
provider=provider2,
|
||||
check_id=check_id,
|
||||
inserted_at=now,
|
||||
resources_total=7,
|
||||
resources_fail=7,
|
||||
fail_count=7,
|
||||
)
|
||||
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-group-latest"),
|
||||
{"filter[check_id]": check_id},
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
data = response.json()["data"]
|
||||
assert len(data) == 1
|
||||
attrs = data[0]["attributes"]
|
||||
assert attrs["resources_total"] == 27
|
||||
assert attrs["resources_fail"] == 27
|
||||
assert attrs["fail_count"] == 27
|
||||
|
||||
def test_finding_groups_latest_provider_type_filter(
|
||||
self, authenticated_client, finding_groups_fixture
|
||||
):
|
||||
|
||||
@@ -1241,7 +1241,7 @@ class AttackPathsQueryRunRequestSerializer(BaseSerializerV1):
|
||||
|
||||
|
||||
class AttackPathsCustomQueryRunRequestSerializer(BaseSerializerV1):
|
||||
query = serializers.CharField(max_length=10000, min_length=1, trim_whitespace=True)
|
||||
query = serializers.CharField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "attack-paths-custom-query-run-requests"
|
||||
|
||||
@@ -51,13 +51,6 @@ from api.v1.views import (
|
||||
)
|
||||
|
||||
|
||||
# This helper view is used to block any endpoints that should not be available
|
||||
# To use it, add a new entry in the `urlpatterns` list, for example (old but real one):
|
||||
# path(
|
||||
# "attack-paths-scans/<uuid:pk>/queries/custom",
|
||||
# _blocked_endpoint,
|
||||
# name="attack-paths-scans-queries-custom-blocked",
|
||||
# ),
|
||||
@csrf_exempt
|
||||
def _blocked_endpoint(request, *args, **kwargs):
|
||||
return JsonResponse(
|
||||
@@ -216,6 +209,17 @@ urlpatterns = [
|
||||
path("tokens/saml", SAMLTokenValidateView.as_view(), name="token-saml"),
|
||||
path("tokens/google", GoogleSocialLoginView.as_view(), name="token-google"),
|
||||
path("tokens/github", GithubSocialLoginView.as_view(), name="token-github"),
|
||||
# TODO: Remove these blocked endpoints once they are properly tested
|
||||
path(
|
||||
"attack-paths-scans/<uuid:pk>/queries/custom",
|
||||
_blocked_endpoint,
|
||||
name="attack-paths-scans-queries-custom-blocked",
|
||||
),
|
||||
path(
|
||||
"attack-paths-scans/<uuid:pk>/schema",
|
||||
_blocked_endpoint,
|
||||
name="attack-paths-scans-schema-blocked",
|
||||
),
|
||||
path("", include(router.urls)),
|
||||
path("", include(tenants_router.urls)),
|
||||
path("", include(users_router.urls)),
|
||||
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timedelta, timezone
|
||||
@@ -11,12 +12,12 @@ from decimal import ROUND_HALF_UP, Decimal, InvalidOperation
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import sentry_sdk
|
||||
|
||||
from allauth.socialaccount.models import SocialAccount, SocialApp
|
||||
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
|
||||
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
|
||||
from allauth.socialaccount.providers.saml.views import FinishACSView, LoginView
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
from celery import chain
|
||||
from celery.result import AsyncResult
|
||||
from config.custom_logging import BackendLogger
|
||||
from config.env import env
|
||||
@@ -75,14 +76,12 @@ from rest_framework.exceptions import (
|
||||
)
|
||||
from rest_framework.generics import GenericAPIView, get_object_or_404
|
||||
from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework_json_api import filters as jsonapi_filters
|
||||
from rest_framework_json_api.views import RelationshipView, Response
|
||||
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
from tasks.jobs.attack_paths import db_utils as attack_paths_db_utils
|
||||
from tasks.jobs.export import get_s3_client
|
||||
from tasks.tasks import (
|
||||
aggregate_finding_group_summaries_task,
|
||||
backfill_compliance_summaries_task,
|
||||
backfill_scan_resource_summaries_task,
|
||||
check_integration_connection_task,
|
||||
@@ -101,6 +100,7 @@ from api.attack_paths import database as graph_database
|
||||
from api.attack_paths import get_queries_for_provider, get_query_by_id
|
||||
from api.attack_paths import views_helpers as attack_paths_views_helpers
|
||||
from api.base_views import BaseRLSViewSet, BaseTenantViewset, BaseUserViewset
|
||||
from api.renderers import APIJSONRenderer, PlainTextRenderer
|
||||
from api.compliance import (
|
||||
PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE,
|
||||
get_compliance_frameworks,
|
||||
@@ -199,7 +199,6 @@ from api.models import (
|
||||
)
|
||||
from api.pagination import ComplianceOverviewPagination
|
||||
from api.rbac.permissions import Permissions, get_providers, get_role
|
||||
from api.renderers import APIJSONRenderer, PlainTextRenderer
|
||||
from api.rls import Tenant
|
||||
from api.utils import (
|
||||
CustomOAuth2Client,
|
||||
@@ -409,7 +408,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.23.0"
|
||||
spectacular_settings.VERSION = "1.21.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -2453,11 +2452,6 @@ class AttackPathsScanViewSet(BaseRLSViewSet):
|
||||
# RBAC required permissions
|
||||
required_permissions = [Permissions.MANAGE_SCANS]
|
||||
|
||||
def get_throttles(self):
|
||||
if self.action == "run_custom_attack_paths_query":
|
||||
self.throttle_scope = "attack-paths-custom-query"
|
||||
return super().get_throttles()
|
||||
|
||||
def set_required_permissions(self):
|
||||
if self.request.method in SAFE_METHODS:
|
||||
self.required_permissions = []
|
||||
@@ -6727,25 +6721,10 @@ class MuteRuleViewSet(BaseRLSViewSet):
|
||||
)
|
||||
|
||||
# Launch background task for historical muting
|
||||
latest_scan_id = (
|
||||
Scan.objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("-completed_at", "-inserted_at")
|
||||
.values_list("id", flat=True)
|
||||
.first()
|
||||
)
|
||||
|
||||
transaction.on_commit(
|
||||
lambda: chain(
|
||||
mute_historical_findings_task.si(
|
||||
tenant_id=tenant_id,
|
||||
mute_rule_id=str(mute_rule.id),
|
||||
),
|
||||
aggregate_finding_group_summaries_task.si(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(latest_scan_id),
|
||||
),
|
||||
).apply_async()
|
||||
)
|
||||
with transaction.atomic():
|
||||
mute_historical_findings_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "mute_rule_id": str(mute_rule.id)}
|
||||
)
|
||||
|
||||
# Return the created mute rule
|
||||
serializer = self.get_serializer(mute_rule)
|
||||
@@ -6793,29 +6772,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
queryset = FindingGroupDailySummary.objects.all()
|
||||
serializer_class = FindingGroupSerializer
|
||||
filterset_class = FindingGroupSummaryFilter
|
||||
filter_backends = [
|
||||
jsonapi_filters.QueryParameterValidationFilter,
|
||||
jsonapi_filters.OrderingFilter,
|
||||
CustomDjangoFilterBackend,
|
||||
]
|
||||
http_method_names = ["get"]
|
||||
required_permissions = []
|
||||
|
||||
def get_filterset_class(self):
|
||||
"""Return the filterset class used for schema generation and the list action.
|
||||
|
||||
Note: The resources and latest_resources actions do not use this method
|
||||
at runtime. They manually instantiate FindingGroupFilter /
|
||||
LatestFindingGroupFilter against a Finding queryset (see
|
||||
_get_finding_queryset). The class returned here for those actions only
|
||||
affects the OpenAPI schema generated by drf-spectacular.
|
||||
"""
|
||||
"""Return appropriate filter based on action."""
|
||||
if self.action == "latest":
|
||||
return LatestFindingGroupSummaryFilter
|
||||
if self.action == "resources":
|
||||
return FindingGroupFilter
|
||||
if self.action == "latest_resources":
|
||||
return LatestFindingGroupFilter
|
||||
return FindingGroupSummaryFilter
|
||||
|
||||
def get_queryset(self):
|
||||
@@ -7227,15 +7190,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
raise ValidationError(filterset.errors)
|
||||
filtered_queryset = filterset.qs
|
||||
|
||||
# Keep only the latest row per (check_id, provider), then aggregate by check_id.
|
||||
latest_per_check_ids = (
|
||||
filtered_queryset.order_by("check_id", "provider_id", "-inserted_at")
|
||||
.distinct("check_id", "provider_id")
|
||||
.values("id")
|
||||
)
|
||||
latest_per_check = filtered_queryset.filter(
|
||||
id__in=Subquery(latest_per_check_ids)
|
||||
)
|
||||
# Keep only rows from the latest inserted_at date per check_id
|
||||
latest_per_check = filtered_queryset.annotate(
|
||||
latest_inserted_at=Window(
|
||||
expression=Max("inserted_at"),
|
||||
partition_by=[F("check_id")],
|
||||
)
|
||||
).filter(inserted_at=F("latest_inserted_at"))
|
||||
|
||||
# Re-aggregate daily summaries
|
||||
aggregated_queryset = self._aggregate_daily_summaries(latest_per_check)
|
||||
@@ -7271,7 +7232,6 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
and timing information including how long they have been failing.
|
||||
""",
|
||||
tags=["Finding Groups"],
|
||||
filters=True,
|
||||
)
|
||||
@action(detail=True, methods=["get"], url_path="resources")
|
||||
def resources(self, request, pk=None):
|
||||
@@ -7346,7 +7306,6 @@ class FindingGroupViewSet(BaseRLSViewSet):
|
||||
and timing information. No date filters required.
|
||||
""",
|
||||
tags=["Finding Groups"],
|
||||
filters=True,
|
||||
)
|
||||
@action(
|
||||
detail=False,
|
||||
|
||||
@@ -113,11 +113,8 @@ REST_FRAMEWORK = {
|
||||
"rest_framework.throttling.ScopedRateThrottle",
|
||||
],
|
||||
"DEFAULT_THROTTLE_RATES": {
|
||||
"dj_rest_auth": None,
|
||||
"token-obtain": env("DJANGO_THROTTLE_TOKEN_OBTAIN", default=None),
|
||||
"attack-paths-custom-query": env(
|
||||
"DJANGO_THROTTLE_ATTACK_PATHS_CUSTOM_QUERY", default="10/min"
|
||||
),
|
||||
"dj_rest_auth": None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -3,10 +3,6 @@ from config.env import env
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
CORS_ALLOWED_ORIGINS = env.list(
|
||||
"DJANGO_CORS_ALLOWED_ORIGINS",
|
||||
default=["http://localhost", "http://127.0.0.1"],
|
||||
)
|
||||
|
||||
# Database
|
||||
# TODO Use Django database routers https://docs.djangoproject.com/en/5.0/topics/db/multi-db/#automatic-database-routing
|
||||
|
||||
@@ -1,30 +1,25 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable
|
||||
from uuid import UUID
|
||||
|
||||
from config.env import env
|
||||
|
||||
from tasks.jobs.attack_paths import aws
|
||||
|
||||
# Batch size for Neo4j write operations (resource labeling, cleanup)
|
||||
|
||||
# Batch size for Neo4j operations
|
||||
BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
|
||||
# Batch size for Postgres findings fetch (keyset pagination page size)
|
||||
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 500)
|
||||
# Batch size for temp-to-tenant graph sync (nodes and relationships per cursor page)
|
||||
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 250)
|
||||
|
||||
# Neo4j internal labels (Prowler-specific, not provider-specific)
|
||||
# - `Internet`: Singleton node representing external internet access for exposed-resource queries
|
||||
# - `ProwlerFinding`: Label for finding nodes created by Prowler and linked to cloud resources
|
||||
# - `_ProviderResource`: Added to ALL synced nodes for provider isolation and drop/query ops
|
||||
INTERNET_NODE_LABEL = "Internet"
|
||||
# - `Internet`: Singleton node representing external internet access for exposed-resource queries
|
||||
PROWLER_FINDING_LABEL = "ProwlerFinding"
|
||||
PROVIDER_RESOURCE_LABEL = "_ProviderResource"
|
||||
INTERNET_NODE_LABEL = "Internet"
|
||||
|
||||
# Dynamic isolation labels that contain entity UUIDs and are added to every synced node during sync
|
||||
# Format: _Tenant_{uuid_no_hyphens}, _Provider_{uuid_no_hyphens}
|
||||
TENANT_LABEL_PREFIX = "_Tenant_"
|
||||
PROVIDER_LABEL_PREFIX = "_Provider_"
|
||||
DYNAMIC_ISOLATION_PREFIXES = [TENANT_LABEL_PREFIX, PROVIDER_LABEL_PREFIX]
|
||||
# Phase 1 dual-write: deprecated label kept for drop_subgraph and infrastructure queries
|
||||
# Remove in Phase 2 once all nodes use the private label exclusively
|
||||
DEPRECATED_PROVIDER_RESOURCE_LABEL = "ProviderResource"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@@ -36,6 +31,7 @@ class ProviderConfig:
|
||||
uid_field: str # e.g., "arn"
|
||||
# Label for resources connected to the account node, enabling indexed finding lookups.
|
||||
resource_label: str # e.g., "_AWSResource"
|
||||
deprecated_resource_label: str # e.g., "AWSResource"
|
||||
ingestion_function: Callable
|
||||
|
||||
|
||||
@@ -47,6 +43,7 @@ AWS_CONFIG = ProviderConfig(
|
||||
root_node_label="AWSAccount",
|
||||
uid_field="arn",
|
||||
resource_label="_AWSResource",
|
||||
deprecated_resource_label="AWSResource",
|
||||
ingestion_function=aws.start_aws_ingestion,
|
||||
)
|
||||
|
||||
@@ -59,16 +56,18 @@ PROVIDER_CONFIGS: dict[str, ProviderConfig] = {
|
||||
INTERNAL_LABELS: list[str] = [
|
||||
"Tenant", # From Cartography, but it looks like it's ours
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
DEPRECATED_PROVIDER_RESOURCE_LABEL,
|
||||
# Add all provider-specific resource labels
|
||||
*[config.resource_label for config in PROVIDER_CONFIGS.values()],
|
||||
*[config.deprecated_resource_label for config in PROVIDER_CONFIGS.values()],
|
||||
]
|
||||
|
||||
# Provider isolation properties
|
||||
PROVIDER_ID_PROPERTY = "_provider_id"
|
||||
PROVIDER_ELEMENT_ID_PROPERTY = "_provider_element_id"
|
||||
|
||||
PROVIDER_ISOLATION_PROPERTIES: list[str] = [
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
"_provider_id",
|
||||
"_provider_element_id",
|
||||
"provider_id",
|
||||
"provider_element_id",
|
||||
]
|
||||
|
||||
# Cartography bookkeeping metadata
|
||||
@@ -118,25 +117,7 @@ def get_provider_resource_label(provider_type: str) -> str:
|
||||
return config.resource_label if config else "_UnknownProviderResource"
|
||||
|
||||
|
||||
# Dynamic Isolation Label Helpers
|
||||
# --------------------------------
|
||||
|
||||
|
||||
def _normalize_uuid(value: str | UUID) -> str:
|
||||
"""Strip hyphens from a UUID string for use in Neo4j labels."""
|
||||
return str(value).replace("-", "")
|
||||
|
||||
|
||||
def get_tenant_label(tenant_id: str | UUID) -> str:
|
||||
"""Get the Neo4j label for a tenant (e.g., `_Tenant_019c41ee7df37deca684d839f95619f8`)."""
|
||||
return f"{TENANT_LABEL_PREFIX}{_normalize_uuid(tenant_id)}"
|
||||
|
||||
|
||||
def get_provider_label(provider_id: str | UUID) -> str:
|
||||
"""Get the Neo4j label for a provider (e.g., `_Provider_019c41ee7df37deca684d839f95619f8`)."""
|
||||
return f"{PROVIDER_LABEL_PREFIX}{_normalize_uuid(provider_id)}"
|
||||
|
||||
|
||||
def is_dynamic_isolation_label(label: str) -> bool:
|
||||
"""Check if a label is a dynamic tenant/provider isolation label."""
|
||||
return any(label.startswith(prefix) for prefix in DYNAMIC_ISOLATION_PREFIXES)
|
||||
def get_deprecated_provider_resource_label(provider_type: str) -> str:
|
||||
"""Get the deprecated resource label for a provider type (e.g., `AWSResource`)."""
|
||||
config = PROVIDER_CONFIGS.get(provider_type)
|
||||
return config.deprecated_resource_label if config else "UnknownProviderResource"
|
||||
|
||||
@@ -3,13 +3,15 @@ from typing import Any
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
from tasks.jobs.attack_paths.config import is_provider_available
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import AttackPathsScan as ProwlerAPIAttackPathsScan
|
||||
from api.models import Provider as ProwlerAPIProvider
|
||||
from api.models import StateChoices
|
||||
from api.models import (
|
||||
AttackPathsScan as ProwlerAPIAttackPathsScan,
|
||||
Provider as ProwlerAPIProvider,
|
||||
StateChoices,
|
||||
)
|
||||
from tasks.jobs.attack_paths.config import is_provider_available
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -153,37 +155,6 @@ def set_provider_graph_data_ready(
|
||||
attack_paths_scan.refresh_from_db(fields=["graph_data_ready"])
|
||||
|
||||
|
||||
def recover_graph_data_ready(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
) -> None:
|
||||
"""
|
||||
Best-effort recovery of `graph_data_ready` after a scan failure.
|
||||
|
||||
Queries Neo4j to check if the provider still has data in the tenant
|
||||
database. If data exists, restores `graph_data_ready=True` for all scans
|
||||
of this provider. Never raises.
|
||||
|
||||
Trade-off: if the worker crashed mid-sync, partial data may exist and
|
||||
this will re-enable queries against it. We accept that because leaving
|
||||
`graph_data_ready=False` permanently (blocking all queries until the
|
||||
next successful scan) is a worse outcome for the user.
|
||||
"""
|
||||
try:
|
||||
tenant_db = graph_database.get_database_name(attack_paths_scan.tenant_id)
|
||||
if graph_database.has_provider_data(
|
||||
tenant_db, str(attack_paths_scan.provider_id)
|
||||
):
|
||||
set_provider_graph_data_ready(attack_paths_scan, True)
|
||||
logger.info(
|
||||
f"Recovered `graph_data_ready` for provider {attack_paths_scan.provider_id}"
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to recover `graph_data_ready` for provider {attack_paths_scan.provider_id}"
|
||||
)
|
||||
|
||||
|
||||
def fail_attack_paths_scan(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
@@ -214,5 +185,3 @@ def fail_attack_paths_scan(
|
||||
StateChoices.FAILED,
|
||||
{"global_error": error},
|
||||
)
|
||||
|
||||
recover_graph_data_ready(attack_paths_scan)
|
||||
|
||||
@@ -9,15 +9,23 @@ This module handles:
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from dataclasses import asdict, dataclass, fields
|
||||
from typing import Any, Generator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding as FindingModel
|
||||
from api.models import Provider, ResourceFindingMapping
|
||||
from prowler.config import config as ProwlerConfig
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
FINDINGS_BATCH_SIZE,
|
||||
get_deprecated_provider_resource_label,
|
||||
get_node_uid_field,
|
||||
get_provider_resource_label,
|
||||
get_root_node_label,
|
||||
@@ -30,54 +38,75 @@ from tasks.jobs.attack_paths.queries import (
|
||||
render_cypher_template,
|
||||
)
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding as FindingModel
|
||||
from api.models import Provider, ResourceFindingMapping
|
||||
from prowler.config import config as ProwlerConfig
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
# Django ORM field names for `.values()` queries
|
||||
# Most map 1:1 to Neo4j property names, exceptions are remapped in `_to_neo4j_dict`
|
||||
_DB_QUERY_FIELDS = [
|
||||
"id",
|
||||
"uid",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"scan_id",
|
||||
"delta",
|
||||
"status",
|
||||
"status_extended",
|
||||
"severity",
|
||||
"check_id",
|
||||
"check_metadata__checktitle",
|
||||
"muted",
|
||||
"muted_reason",
|
||||
]
|
||||
# Type Definitions
|
||||
# -----------------
|
||||
|
||||
# Maps dataclass field names to Django ORM query field names
|
||||
_DB_FIELD_MAP: dict[str, str] = {
|
||||
"check_title": "check_metadata__checktitle",
|
||||
}
|
||||
|
||||
|
||||
def _to_neo4j_dict(record: dict[str, Any], resource_uid: str) -> dict[str, Any]:
|
||||
"""Transform a Django `.values()` record into a `dict` ready for Neo4j ingestion."""
|
||||
return {
|
||||
"id": str(record["id"]),
|
||||
"uid": record["uid"],
|
||||
"inserted_at": record["inserted_at"],
|
||||
"updated_at": record["updated_at"],
|
||||
"first_seen_at": record["first_seen_at"],
|
||||
"scan_id": str(record["scan_id"]),
|
||||
"delta": record["delta"],
|
||||
"status": record["status"],
|
||||
"status_extended": record["status_extended"],
|
||||
"severity": record["severity"],
|
||||
"check_id": str(record["check_id"]),
|
||||
"check_title": record["check_metadata__checktitle"],
|
||||
"muted": record["muted"],
|
||||
"muted_reason": record["muted_reason"],
|
||||
"resource_uid": resource_uid,
|
||||
}
|
||||
@dataclass(slots=True)
|
||||
class Finding:
|
||||
"""
|
||||
Finding data for Neo4j ingestion.
|
||||
|
||||
Can be created from a Django .values() query result using from_db_record().
|
||||
"""
|
||||
|
||||
id: str
|
||||
uid: str
|
||||
inserted_at: str
|
||||
updated_at: str
|
||||
first_seen_at: str
|
||||
scan_id: str
|
||||
delta: str
|
||||
status: str
|
||||
status_extended: str
|
||||
severity: str
|
||||
check_id: str
|
||||
check_title: str
|
||||
muted: bool
|
||||
muted_reason: str | None
|
||||
resource_uid: str | None = None
|
||||
|
||||
@classmethod
|
||||
def get_db_query_fields(cls) -> tuple[str, ...]:
|
||||
"""Get field names for Django .values() query."""
|
||||
return tuple(
|
||||
_DB_FIELD_MAP.get(f.name, f.name)
|
||||
for f in fields(cls)
|
||||
if f.name != "resource_uid"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db_record(cls, record: dict[str, Any], resource_uid: str) -> "Finding":
|
||||
"""Create a Finding from a Django .values() query result."""
|
||||
return cls(
|
||||
id=str(record["id"]),
|
||||
uid=record["uid"],
|
||||
inserted_at=record["inserted_at"],
|
||||
updated_at=record["updated_at"],
|
||||
first_seen_at=record["first_seen_at"],
|
||||
scan_id=str(record["scan_id"]),
|
||||
delta=record["delta"],
|
||||
status=record["status"],
|
||||
status_extended=record["status_extended"],
|
||||
severity=record["severity"],
|
||||
check_id=str(record["check_id"]),
|
||||
check_title=record["check_metadata__checktitle"],
|
||||
muted=record["muted"],
|
||||
muted_reason=record["muted_reason"],
|
||||
resource_uid=resource_uid,
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dict for Neo4j ingestion."""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
# Public API
|
||||
@@ -124,6 +153,9 @@ def add_resource_label(
|
||||
{
|
||||
"__ROOT_LABEL__": get_root_node_label(provider_type),
|
||||
"__RESOURCE_LABEL__": get_provider_resource_label(provider_type),
|
||||
"__DEPRECATED_RESOURCE_LABEL__": get_deprecated_provider_resource_label(
|
||||
provider_type
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -152,7 +184,7 @@ def add_resource_label(
|
||||
|
||||
def load_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
findings_batches: Generator[list[dict[str, Any]], None, None],
|
||||
findings_batches: Generator[list[Finding], None, None],
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
@@ -181,7 +213,7 @@ def load_findings(
|
||||
batch_size = len(batch)
|
||||
total_records += batch_size
|
||||
|
||||
parameters["findings_data"] = batch
|
||||
parameters["findings_data"] = [f.to_dict() for f in batch]
|
||||
|
||||
logger.info(f"Loading findings batch {batch_num} ({batch_size} records)")
|
||||
neo4j_session.run(query, parameters)
|
||||
@@ -219,17 +251,16 @@ def cleanup_findings(
|
||||
def stream_findings_with_resources(
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
) -> Generator[list[dict[str, Any]], None, None]:
|
||||
) -> Generator[list[Finding], None, None]:
|
||||
"""
|
||||
Stream findings with their associated resources in batches.
|
||||
|
||||
Uses keyset pagination for efficient traversal of large datasets.
|
||||
Memory efficient: yields one batch at a time as dicts ready for Neo4j ingestion,
|
||||
never holds all findings in memory.
|
||||
Memory efficient: yields one batch at a time, never holds all findings in memory.
|
||||
"""
|
||||
logger.info(
|
||||
f"Starting findings stream for scan {scan_id} "
|
||||
f"(tenant {prowler_api_provider.tenant_id}) with batch size {FINDINGS_BATCH_SIZE}"
|
||||
f"(tenant {prowler_api_provider.tenant_id}) with batch size {BATCH_SIZE}"
|
||||
)
|
||||
|
||||
tenant_id = prowler_api_provider.tenant_id
|
||||
@@ -278,14 +309,15 @@ def _fetch_findings_batch(
|
||||
Uses read replica and RLS-scoped transaction.
|
||||
"""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Use `all_objects` to get `Findings` even on soft-deleted `Providers`
|
||||
# But even the provider is already validated as active in this context
|
||||
# Use all_objects to avoid the ActiveProviderManager's implicit JOIN
|
||||
# through Scan -> Provider (to check is_deleted=False).
|
||||
# The provider is already validated as active in this context.
|
||||
qs = FindingModel.all_objects.filter(scan_id=scan_id).order_by("id")
|
||||
|
||||
if after_id is not None:
|
||||
qs = qs.filter(id__gt=after_id)
|
||||
|
||||
return list(qs.values(*_DB_QUERY_FIELDS)[:FINDINGS_BATCH_SIZE])
|
||||
return list(qs.values(*Finding.get_db_query_fields())[:BATCH_SIZE])
|
||||
|
||||
|
||||
# Batch Enrichment
|
||||
@@ -295,7 +327,7 @@ def _fetch_findings_batch(
|
||||
def _enrich_batch_with_resources(
|
||||
findings_batch: list[dict[str, Any]],
|
||||
tenant_id: str,
|
||||
) -> list[dict[str, Any]]:
|
||||
) -> list[Finding]:
|
||||
"""
|
||||
Enrich findings with their resource UIDs.
|
||||
|
||||
@@ -306,7 +338,7 @@ def _enrich_batch_with_resources(
|
||||
resource_map = _build_finding_resource_map(finding_ids, tenant_id)
|
||||
|
||||
return [
|
||||
_to_neo4j_dict(finding, resource_uid)
|
||||
Finding.from_db_record(finding, resource_uid)
|
||||
for finding in findings_batch
|
||||
for resource_uid in resource_map.get(finding["id"], [])
|
||||
]
|
||||
|
||||
@@ -6,10 +6,9 @@ from cartography.client.core.tx import run_write_query
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
DEPRECATED_PROVIDER_RESOURCE_LABEL,
|
||||
INTERNET_NODE_LABEL,
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
@@ -28,6 +27,8 @@ FINDINGS_INDEX_STATEMENTS = [
|
||||
# Resource indexes for Prowler Finding lookups
|
||||
"CREATE INDEX aws_resource_arn IF NOT EXISTS FOR (n:_AWSResource) ON (n.arn);",
|
||||
"CREATE INDEX aws_resource_id IF NOT EXISTS FOR (n:_AWSResource) ON (n.id);",
|
||||
"CREATE INDEX deprecated_aws_resource_arn IF NOT EXISTS FOR (n:AWSResource) ON (n.arn);",
|
||||
"CREATE INDEX deprecated_aws_resource_id IF NOT EXISTS FOR (n:AWSResource) ON (n.id);",
|
||||
# Prowler Finding indexes
|
||||
f"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.id);",
|
||||
f"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.provider_uid);",
|
||||
@@ -39,8 +40,10 @@ FINDINGS_INDEX_STATEMENTS = [
|
||||
|
||||
# Indexes for provider resource sync operations
|
||||
SYNC_INDEX_STATEMENTS = [
|
||||
f"CREATE INDEX provider_resource_element_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.{PROVIDER_ELEMENT_ID_PROPERTY});",
|
||||
f"CREATE INDEX provider_resource_provider_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.{PROVIDER_ID_PROPERTY});",
|
||||
f"CREATE INDEX provider_element_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n._provider_element_id);",
|
||||
f"CREATE INDEX provider_resource_provider_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n._provider_id);",
|
||||
f"CREATE INDEX deprecated_provider_element_id IF NOT EXISTS FOR (n:{DEPRECATED_PROVIDER_RESOURCE_LABEL}) ON (n.provider_element_id);",
|
||||
f"CREATE INDEX deprecated_provider_resource_provider_id IF NOT EXISTS FOR (n:{DEPRECATED_PROVIDER_RESOURCE_LABEL}) ON (n.provider_id);",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
INTERNET_NODE_LABEL,
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_ELEMENT_ID_PROPERTY,
|
||||
PROVIDER_ID_PROPERTY,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
@@ -28,7 +26,7 @@ ADD_RESOURCE_LABEL_TEMPLATE = """
|
||||
MATCH (account:__ROOT_LABEL__ {id: $provider_uid})-->(r)
|
||||
WHERE NOT r:__ROOT_LABEL__ AND NOT r:__RESOURCE_LABEL__
|
||||
WITH r LIMIT $batch_size
|
||||
SET r:__RESOURCE_LABEL__
|
||||
SET r:__RESOURCE_LABEL__:__DEPRECATED_RESOURCE_LABEL__
|
||||
RETURN COUNT(r) AS labeled_count
|
||||
"""
|
||||
|
||||
@@ -151,18 +149,22 @@ RELATIONSHIPS_FETCH_QUERY = """
|
||||
LIMIT $batch_size
|
||||
"""
|
||||
|
||||
NODE_SYNC_TEMPLATE = f"""
|
||||
NODE_SYNC_TEMPLATE = """
|
||||
UNWIND $rows AS row
|
||||
MERGE (n:__NODE_LABELS__ {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.provider_element_id}})
|
||||
MERGE (n:__NODE_LABELS__ {_provider_element_id: row.provider_element_id})
|
||||
SET n += row.props
|
||||
SET n.{PROVIDER_ID_PROPERTY} = $provider_id
|
||||
"""
|
||||
SET n._provider_id = $provider_id
|
||||
SET n.provider_element_id = row.provider_element_id
|
||||
SET n.provider_id = $provider_id
|
||||
""" # The last two lines are deprecated properties
|
||||
|
||||
RELATIONSHIP_SYNC_TEMPLATE = f"""
|
||||
UNWIND $rows AS row
|
||||
MATCH (s:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.start_element_id}})
|
||||
MATCH (t:{PROVIDER_RESOURCE_LABEL} {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.end_element_id}})
|
||||
MERGE (s)-[r:__REL_TYPE__ {{{PROVIDER_ELEMENT_ID_PROPERTY}: row.provider_element_id}}]->(t)
|
||||
MATCH (s:{PROVIDER_RESOURCE_LABEL} {{_provider_element_id: row.start_element_id}})
|
||||
MATCH (t:{PROVIDER_RESOURCE_LABEL} {{_provider_element_id: row.end_element_id}})
|
||||
MERGE (s)-[r:__REL_TYPE__ {{_provider_element_id: row.provider_element_id}}]->(t)
|
||||
SET r += row.props
|
||||
SET r.{PROVIDER_ID_PROPERTY} = $provider_id
|
||||
"""
|
||||
SET r._provider_id = $provider_id
|
||||
SET r.provider_element_id = row.provider_element_id
|
||||
SET r.provider_id = $provider_id
|
||||
""" # The last two lines are deprecated properties
|
||||
|
||||
@@ -55,6 +55,7 @@ exception propagates to Celery.
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
from typing import Any
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
@@ -62,14 +63,16 @@ from cartography.intel import analysis as cartography_analysis
|
||||
from cartography.intel import create_indexes as cartography_create_indexes
|
||||
from cartography.intel import ontology as cartography_ontology
|
||||
from celery.utils.log import get_task_logger
|
||||
from tasks.jobs.attack_paths import db_utils, findings, internet, sync, utils
|
||||
from tasks.jobs.attack_paths.config import get_cartography_ingestion_function
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider as ProwlerAPIProvider
|
||||
from api.models import StateChoices
|
||||
from api.models import (
|
||||
Provider as ProwlerAPIProvider,
|
||||
StateChoices,
|
||||
)
|
||||
from api.utils import initialize_prowler_provider
|
||||
from tasks.jobs.attack_paths import db_utils, findings, internet, sync, utils
|
||||
from tasks.jobs.attack_paths.config import get_cartography_ingestion_function
|
||||
|
||||
# Without this Celery goes crazy with Cartography logging
|
||||
logging.getLogger("cartography").setLevel(logging.ERROR)
|
||||
@@ -144,10 +147,6 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
attack_paths_scan, task_id, tenant_cartography_config
|
||||
)
|
||||
|
||||
subgraph_dropped = False
|
||||
sync_completed = False
|
||||
provider_gated = False
|
||||
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating Neo4j database {tmp_cartography_config.neo4j_database} for tenant {prowler_api_provider.tenant_id}"
|
||||
@@ -226,12 +225,10 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
|
||||
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
|
||||
db_utils.set_provider_graph_data_ready(attack_paths_scan, False)
|
||||
provider_gated = True
|
||||
graph_database.drop_subgraph(
|
||||
database=tenant_database_name,
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
subgraph_dropped = True
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 98)
|
||||
|
||||
logger.info(
|
||||
@@ -240,10 +237,8 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
sync.sync_graph(
|
||||
source_database=tmp_database_name,
|
||||
target_database=tenant_database_name,
|
||||
tenant_id=str(prowler_api_provider.tenant_id),
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
sync_completed = True
|
||||
db_utils.set_graph_data_ready(attack_paths_scan, True)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 99)
|
||||
|
||||
@@ -268,39 +263,23 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
logger.exception(exception_message)
|
||||
ingestion_exceptions["global_error"] = exception_message
|
||||
|
||||
# Recover graph_data_ready based on how far the swap got.
|
||||
# Partial drop (mid-batch failure) may leave `subgraph_dropped=False`
|
||||
# with data partially deleted, so we prefer that over permanently blocked queries.
|
||||
try:
|
||||
if sync_completed:
|
||||
db_utils.set_graph_data_ready(attack_paths_scan, True)
|
||||
elif provider_gated and not subgraph_dropped:
|
||||
db_utils.set_provider_graph_data_ready(attack_paths_scan, True)
|
||||
|
||||
except Exception:
|
||||
logger.error(
|
||||
f"Failed to recover `graph_data_ready` for provider {attack_paths_scan.provider_id}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Dropping the temporary database if it still exists
|
||||
# Handling databases changes
|
||||
try:
|
||||
graph_database.drop_database(tmp_cartography_config.neo4j_database)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to drop temporary Neo4j database `{tmp_cartography_config.neo4j_database}` during cleanup: {e}",
|
||||
f"Failed to drop temporary Neo4j database {tmp_cartography_config.neo4j_database} during cleanup: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Set Attack Paths scan state to FAILED
|
||||
try:
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.FAILED, ingestion_exceptions
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Could not mark Attack Paths scan {attack_paths_scan.id} as `FAILED` (row may have been deleted): {e}",
|
||||
f"Could not mark attack paths scan {attack_paths_scan.id} as FAILED (row may have been deleted): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -8,16 +8,14 @@ to the tenant database, adding provider isolation labels and properties.
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
import neo4j
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
DEPRECATED_PROVIDER_RESOURCE_LABEL,
|
||||
PROVIDER_ISOLATION_PROPERTIES,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
SYNC_BATCH_SIZE,
|
||||
get_provider_label,
|
||||
get_tenant_label,
|
||||
)
|
||||
from tasks.jobs.attack_paths.indexes import IndexType, create_indexes
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
@@ -39,7 +37,6 @@ def create_sync_indexes(neo4j_session) -> None:
|
||||
def sync_graph(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
tenant_id: str,
|
||||
provider_id: str,
|
||||
) -> dict[str, int]:
|
||||
"""
|
||||
@@ -48,7 +45,6 @@ def sync_graph(
|
||||
Args:
|
||||
`source_database`: The temporary scan database
|
||||
`target_database`: The tenant database
|
||||
`tenant_id`: The tenant ID for isolation
|
||||
`provider_id`: The provider ID for isolation
|
||||
|
||||
Returns:
|
||||
@@ -57,7 +53,6 @@ def sync_graph(
|
||||
nodes_synced = sync_nodes(
|
||||
source_database,
|
||||
target_database,
|
||||
tenant_id,
|
||||
provider_id,
|
||||
)
|
||||
relationships_synced = sync_relationships(
|
||||
@@ -75,45 +70,50 @@ def sync_graph(
|
||||
def sync_nodes(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
tenant_id: str,
|
||||
provider_id: str,
|
||||
) -> int:
|
||||
"""
|
||||
Sync nodes from source to target database.
|
||||
|
||||
Adds `_ProviderResource` label and `_provider_id` property to all nodes.
|
||||
Also adds dynamic `_Tenant_{id}` and `_Provider_{id}` isolation labels.
|
||||
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
"""
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
while True:
|
||||
grouped: dict[tuple[str, ...], list[dict[str, Any]]] = defaultdict(list)
|
||||
batch_count = 0
|
||||
|
||||
with graph_database.get_session(source_database) as source_session:
|
||||
result = source_session.run(
|
||||
NODE_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": SYNC_BATCH_SIZE},
|
||||
with (
|
||||
graph_database.get_session(source_database) as source_session,
|
||||
graph_database.get_session(target_database) as target_session,
|
||||
):
|
||||
while True:
|
||||
rows = list(
|
||||
source_session.run(
|
||||
NODE_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": BATCH_SIZE},
|
||||
)
|
||||
)
|
||||
for record in result:
|
||||
batch_count += 1
|
||||
last_id = record["internal_id"]
|
||||
key, value = _node_to_sync_dict(record, provider_id)
|
||||
grouped[key].append(value)
|
||||
|
||||
if batch_count == 0:
|
||||
break
|
||||
if not rows:
|
||||
break
|
||||
|
||||
last_id = rows[-1]["internal_id"]
|
||||
|
||||
grouped: dict[tuple[str, ...], list[dict[str, Any]]] = defaultdict(list)
|
||||
for row in rows:
|
||||
labels = tuple(sorted(set(row["labels"] or [])))
|
||||
props = dict(row["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
provider_element_id = f"{provider_id}:{row['element_id']}"
|
||||
grouped[labels].append(
|
||||
{
|
||||
"provider_element_id": provider_element_id,
|
||||
"props": props,
|
||||
}
|
||||
)
|
||||
|
||||
with graph_database.get_session(target_database) as target_session:
|
||||
for labels, batch in grouped.items():
|
||||
label_set = set(labels)
|
||||
label_set.add(PROVIDER_RESOURCE_LABEL)
|
||||
label_set.add(get_tenant_label(tenant_id))
|
||||
label_set.add(get_provider_label(provider_id))
|
||||
label_set.add(DEPRECATED_PROVIDER_RESOURCE_LABEL)
|
||||
node_labels = ":".join(f"`{label}`" for label in sorted(label_set))
|
||||
|
||||
query = render_cypher_template(
|
||||
@@ -127,10 +127,10 @@ def sync_nodes(
|
||||
},
|
||||
)
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
f"Synced {total_synced} nodes from {source_database} to {target_database}"
|
||||
)
|
||||
total_synced += len(rows)
|
||||
logger.info(
|
||||
f"Synced {total_synced} nodes from {source_database} to {target_database}"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
|
||||
@@ -144,32 +144,41 @@ def sync_relationships(
|
||||
Sync relationships from source to target database.
|
||||
|
||||
Adds `_provider_id` property to all relationships.
|
||||
|
||||
Source and target sessions are opened sequentially per batch to avoid
|
||||
holding two Bolt connections simultaneously for the entire sync duration.
|
||||
"""
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
while True:
|
||||
grouped: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
||||
batch_count = 0
|
||||
|
||||
with graph_database.get_session(source_database) as source_session:
|
||||
result = source_session.run(
|
||||
RELATIONSHIPS_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": SYNC_BATCH_SIZE},
|
||||
with (
|
||||
graph_database.get_session(source_database) as source_session,
|
||||
graph_database.get_session(target_database) as target_session,
|
||||
):
|
||||
while True:
|
||||
rows = list(
|
||||
source_session.run(
|
||||
RELATIONSHIPS_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": BATCH_SIZE},
|
||||
)
|
||||
)
|
||||
for record in result:
|
||||
batch_count += 1
|
||||
last_id = record["internal_id"]
|
||||
key, value = _rel_to_sync_dict(record, provider_id)
|
||||
grouped[key].append(value)
|
||||
|
||||
if batch_count == 0:
|
||||
break
|
||||
if not rows:
|
||||
break
|
||||
|
||||
last_id = rows[-1]["internal_id"]
|
||||
|
||||
grouped: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
||||
for row in rows:
|
||||
props = dict(row["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
rel_type = row["rel_type"]
|
||||
grouped[rel_type].append(
|
||||
{
|
||||
"start_element_id": f"{provider_id}:{row['start_element_id']}",
|
||||
"end_element_id": f"{provider_id}:{row['end_element_id']}",
|
||||
"provider_element_id": f"{provider_id}:{rel_type}:{row['internal_id']}",
|
||||
"props": props,
|
||||
}
|
||||
)
|
||||
|
||||
with graph_database.get_session(target_database) as target_session:
|
||||
for rel_type, batch in grouped.items():
|
||||
query = render_cypher_template(
|
||||
RELATIONSHIP_SYNC_TEMPLATE, {"__REL_TYPE__": rel_type}
|
||||
@@ -182,42 +191,14 @@ def sync_relationships(
|
||||
},
|
||||
)
|
||||
|
||||
total_synced += batch_count
|
||||
logger.info(
|
||||
f"Synced {total_synced} relationships from {source_database} to {target_database}"
|
||||
)
|
||||
total_synced += len(rows)
|
||||
logger.info(
|
||||
f"Synced {total_synced} relationships from {source_database} to {target_database}"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
|
||||
|
||||
def _node_to_sync_dict(
|
||||
record: neo4j.Record, provider_id: str
|
||||
) -> tuple[tuple[str, ...], dict[str, Any]]:
|
||||
"""Transform a source node record into a (grouping_key, sync_dict) pair."""
|
||||
props = dict(record["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
labels = tuple(sorted(set(record["labels"] or [])))
|
||||
return labels, {
|
||||
"provider_element_id": f"{provider_id}:{record['element_id']}",
|
||||
"props": props,
|
||||
}
|
||||
|
||||
|
||||
def _rel_to_sync_dict(
|
||||
record: neo4j.Record, provider_id: str
|
||||
) -> tuple[str, dict[str, Any]]:
|
||||
"""Transform a source relationship record into a (grouping_key, sync_dict) pair."""
|
||||
props = dict(record["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
rel_type = record["rel_type"]
|
||||
return rel_type, {
|
||||
"start_element_id": f"{provider_id}:{record['start_element_id']}",
|
||||
"end_element_id": f"{provider_id}:{record['end_element_id']}",
|
||||
"provider_element_id": f"{provider_id}:{rel_type}:{record['internal_id']}",
|
||||
"props": props,
|
||||
}
|
||||
|
||||
|
||||
def _strip_internal_properties(props: dict[str, Any]) -> None:
|
||||
"""Remove provider isolation properties before the += spread in sync templates."""
|
||||
for key in PROVIDER_ISOLATION_PROPERTIES:
|
||||
|
||||
@@ -4,7 +4,7 @@ from django.db.models import Count, Q
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding, Scan, StatusChoices
|
||||
from api.models import Finding, StatusChoices
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
@@ -35,26 +35,25 @@ def _aggregate_requirement_statistics_from_database(
|
||||
}
|
||||
"""
|
||||
requirement_statistics_by_check_id = {}
|
||||
# TODO: review when finding-resource relation changes from 1:1
|
||||
# TODO: take into account that now the relation is 1 finding == 1 resource, review this when the logic changes
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Pre-check: skip if the scan's provider is deleted (avoids JOINs in the main query)
|
||||
if Scan.all_objects.filter(id=scan_id, provider__is_deleted=True).exists():
|
||||
return requirement_statistics_by_check_id
|
||||
|
||||
aggregated_statistics_queryset = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
muted=False,
|
||||
resources__provider__is_deleted=False,
|
||||
)
|
||||
.values("check_id")
|
||||
.annotate(
|
||||
total_findings=Count(
|
||||
"id",
|
||||
distinct=True,
|
||||
filter=Q(status__in=[StatusChoices.PASS, StatusChoices.FAIL]),
|
||||
),
|
||||
passed_findings=Count(
|
||||
"id",
|
||||
distinct=True,
|
||||
filter=Q(status=StatusChoices.PASS),
|
||||
),
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -169,27 +169,35 @@ class TestAggregateRequirementStatistics:
|
||||
assert result["check_1"]["passed"] == 1
|
||||
assert result["check_1"]["total"] == 1
|
||||
|
||||
def test_skips_aggregation_for_deleted_provider(
|
||||
self, tenants_fixture, scans_fixture
|
||||
):
|
||||
"""Verify aggregation returns empty when the scan's provider is soft-deleted."""
|
||||
def test_excludes_findings_without_resources(self, tenants_fixture, scans_fixture):
|
||||
"""Verify findings without resources are excluded from aggregation."""
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
# Finding WITH resource → should be counted
|
||||
self._create_finding_with_resource(
|
||||
tenant, scan, "finding-1", "check_1", StatusChoices.PASS
|
||||
)
|
||||
|
||||
# Soft-delete the provider
|
||||
provider = scan.provider
|
||||
provider.is_deleted = True
|
||||
provider.save(update_fields=["is_deleted"])
|
||||
# Finding WITHOUT resource → should be EXCLUDED
|
||||
Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
scan=scan,
|
||||
uid="finding-2",
|
||||
check_id="check_1",
|
||||
status=StatusChoices.FAIL,
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
check_metadata={},
|
||||
raw_result={},
|
||||
)
|
||||
|
||||
result = _aggregate_requirement_statistics_from_database(
|
||||
str(tenant.id), str(scan.id)
|
||||
)
|
||||
|
||||
assert result == {}
|
||||
assert result["check_1"]["passed"] == 1
|
||||
assert result["check_1"]["total"] == 1
|
||||
|
||||
def test_multiple_resources_no_double_count(self, tenants_fixture, scans_fixture):
|
||||
"""Verify a finding with multiple resources is only counted once."""
|
||||
|
||||
@@ -558,9 +558,9 @@ neo4j:
|
||||
# Neo4j Configuration (yaml format)
|
||||
config:
|
||||
dbms_security_procedures_allowlist: "apoc.*"
|
||||
dbms_security_procedures_unrestricted: ""
|
||||
dbms_security_procedures_unrestricted: "apoc.*"
|
||||
|
||||
apoc_config:
|
||||
apoc.export.file.enabled: "false"
|
||||
apoc.import.file.enabled: "false"
|
||||
apoc.export.file.enabled: "true"
|
||||
apoc.import.file.enabled: "true"
|
||||
apoc.import.file.use_neo4j_config: "true"
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_rbi
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_containers_rbi(aux, "REQUIREMENTS_ID")
|
||||
@@ -1,20 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_rbi
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_containers_rbi(aux, "REQUIREMENTS_ID")
|
||||
@@ -1,24 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -80,8 +80,6 @@ def load_csv_files(csv_files):
|
||||
result = result.replace("_M65", " - M65")
|
||||
if "ALIBABACLOUD" in result:
|
||||
result = result.replace("_ALIBABACLOUD", " - ALIBABACLOUD")
|
||||
if "ORACLECLOUD" in result:
|
||||
result = result.replace("_ORACLECLOUD", " - ORACLECLOUD")
|
||||
results.append(result)
|
||||
|
||||
unique_results = set(results)
|
||||
|
||||
@@ -211,7 +211,7 @@ Also is important to keep all code examples as short as possible, including the
|
||||
| email-security | Ensures detection and protection against phishing, spam, spoofing, etc. |
|
||||
| forensics-ready | Ensures systems are instrumented to support post-incident investigations. Any digital trace or evidence (logs, volume snapshots, memory dumps, network captures, etc.) preserved immutably and accompanied by integrity guarantees, which can be used in a forensic analysis |
|
||||
| software-supply-chain | Detects or prevents tampering, unauthorized packages, or third-party risks in software supply chain |
|
||||
| e3 | M365 and Azure Entra checks enabled by or dependent on an E3 license (e.g., baseline security policies, conditional access) |
|
||||
| e5 | M365 and Azure Entra checks enabled by or dependent on an E5 license (e.g., advanced threat protection, audit, DLP, and eDiscovery) |
|
||||
| e3 | M365-specific controls enabled by or dependent on an E3 license (e.g., baseline security policies, conditional access) |
|
||||
| e5 | M365-specific controls enabled by or dependent on an E5 license (e.g., advanced threat protection, audit, DLP, and eDiscovery) |
|
||||
| privilege-escalation | Detects IAM policies or permissions that allow identities to elevate their privileges beyond their intended scope, potentially gaining administrator or higher-level access through specific action combinations |
|
||||
| ec2-imdsv1 | Identifies EC2 instances using Instance Metadata Service version 1 (IMDSv1), which is vulnerable to SSRF attacks and should be replaced with IMDSv2 for enhanced security |
|
||||
| ec2-imdsv1 | Identifies EC2 instances using Instance Metadata Service version 1 (IMDSv1), which is vulnerable to SSRF attacks and should be replaced with IMDSv2 for enhanced security |
|
||||
@@ -304,13 +304,6 @@
|
||||
"pages": [
|
||||
"user-guide/compliance/tutorials/threatscore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Cookbooks",
|
||||
"pages": [
|
||||
"user-guide/cookbooks/kubernetes-in-cluster",
|
||||
"user-guide/cookbooks/cicd-pipeline"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -121,8 +121,8 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.21.0"
|
||||
PROWLER_API_VERSION="5.21.0"
|
||||
PROWLER_UI_VERSION="5.19.0"
|
||||
PROWLER_API_VERSION="5.19.0"
|
||||
```
|
||||
|
||||
<Note>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 420 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 486 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 420 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 323 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 419 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 490 KiB |
@@ -1,243 +0,0 @@
|
||||
---
|
||||
title: 'Run Prowler in CI/CD and Send Findings to Prowler Cloud'
|
||||
---
|
||||
|
||||
This cookbook demonstrates how to integrate Prowler into CI/CD pipelines so that security scans run automatically and findings are sent to Prowler Cloud via [Import Findings](/user-guide/tutorials/prowler-app-import-findings). Examples cover GitHub Actions and GitLab CI.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* A **Prowler Cloud** account with an active subscription (see [Prowler Cloud Pricing](https://prowler.com/pricing))
|
||||
* A Prowler Cloud **API key** with the **Manage Ingestions** permission (see [API Keys](/user-guide/tutorials/prowler-app-api-keys))
|
||||
* Cloud provider credentials configured in the CI/CD environment (e.g., AWS credentials for scanning AWS accounts)
|
||||
* Access to configure pipeline workflows and secrets in the CI/CD platform
|
||||
|
||||
## Key Concepts
|
||||
|
||||
Prowler CLI provides the `--push-to-cloud` flag, which uploads scan results directly to Prowler Cloud after a scan completes. Combined with the `PROWLER_CLOUD_API_KEY` environment variable, this enables fully automated ingestion without manual file uploads.
|
||||
|
||||
For full details on the flag and API, refer to the [Import Findings](/user-guide/tutorials/prowler-app-import-findings) documentation.
|
||||
|
||||
<Note>
|
||||
The examples in this guide use AWS as the target provider, but the same approach applies to any provider supported by Prowler (Azure, GCP, Kubernetes, and others). Replace `prowler aws` with the desired provider command (e.g., `prowler gcp`, `prowler azure`) and configure the corresponding credentials in the CI/CD environment.
|
||||
</Note>
|
||||
|
||||
## GitHub Actions
|
||||
|
||||
### Store Secrets
|
||||
|
||||
Before creating the workflow, add the following secrets to the repository (under "Settings" > "Secrets and variables" > "Actions"):
|
||||
|
||||
* `PROWLER_CLOUD_API_KEY` — the Prowler Cloud API key
|
||||
* Cloud provider credentials (e.g., `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, or configure OIDC-based role assumption)
|
||||
|
||||
### Workflow: Scheduled AWS Scan
|
||||
|
||||
This workflow runs Prowler against an AWS account on a daily schedule and on every push to the `main` branch:
|
||||
|
||||
```yaml
|
||||
name: Prowler Security Scan
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 3 * * *" # Daily at 03:00 UTC
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch: # Allow manual triggers
|
||||
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prowler-scan:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::123456789012:role/ProwlerScanRole
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Install Prowler
|
||||
run: pip install prowler
|
||||
|
||||
- name: Run Prowler Scan
|
||||
env:
|
||||
PROWLER_CLOUD_API_KEY: ${{ secrets.PROWLER_CLOUD_API_KEY }}
|
||||
run: |
|
||||
prowler aws --push-to-cloud
|
||||
```
|
||||
|
||||
<Note>
|
||||
Replace `123456789012` with the actual AWS account ID and `ProwlerScanRole` with the IAM role name. For IAM role setup, refer to the [AWS authentication guide](/user-guide/providers/aws/authentication).
|
||||
</Note>
|
||||
|
||||
### Workflow: Scan Specific Services on Pull Request
|
||||
|
||||
To run targeted scans on pull requests without blocking the merge pipeline, use `continue-on-error`:
|
||||
|
||||
```yaml
|
||||
name: Prowler PR Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
prowler-scan:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::123456789012:role/ProwlerScanRole
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Install Prowler
|
||||
run: pip install prowler
|
||||
|
||||
- name: Run Prowler Scan
|
||||
env:
|
||||
PROWLER_CLOUD_API_KEY: ${{ secrets.PROWLER_CLOUD_API_KEY }}
|
||||
run: |
|
||||
prowler aws --services s3,iam,ec2 --push-to-cloud
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Limiting the scan to specific services with `--services` reduces execution time, making it practical for pull request checks.
|
||||
</Tip>
|
||||
|
||||
## GitLab CI
|
||||
|
||||
### Store Variables
|
||||
|
||||
Add the following CI/CD variables in the GitLab project (under "Settings" > "CI/CD" > "Variables"):
|
||||
|
||||
* `PROWLER_CLOUD_API_KEY` — mark as **masked** and **protected**
|
||||
* Cloud provider credentials as needed
|
||||
|
||||
### Pipeline: Scheduled AWS Scan
|
||||
|
||||
Add the following to `.gitlab-ci.yml`:
|
||||
|
||||
```yaml
|
||||
prowler-scan:
|
||||
image: python:3.12-slim
|
||||
stage: test
|
||||
script:
|
||||
- pip install prowler
|
||||
- prowler aws --push-to-cloud
|
||||
variables:
|
||||
PROWLER_CLOUD_API_KEY: $PROWLER_CLOUD_API_KEY
|
||||
AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID
|
||||
AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY
|
||||
AWS_DEFAULT_REGION: "us-east-1"
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||
when: manual
|
||||
```
|
||||
|
||||
To run the scan on a schedule, create a **Pipeline Schedule** in GitLab (under "Build" > "Pipeline Schedules") with the desired cron expression.
|
||||
|
||||
### Pipeline: Multi-Provider Scan
|
||||
|
||||
To scan multiple cloud providers in parallel:
|
||||
|
||||
```yaml
|
||||
stages:
|
||||
- security
|
||||
|
||||
.prowler-base:
|
||||
image: python:3.12-slim
|
||||
stage: security
|
||||
before_script:
|
||||
- pip install prowler
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
|
||||
prowler-aws:
|
||||
extends: .prowler-base
|
||||
script:
|
||||
- prowler aws --push-to-cloud
|
||||
variables:
|
||||
PROWLER_CLOUD_API_KEY: $PROWLER_CLOUD_API_KEY
|
||||
AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID
|
||||
AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY
|
||||
|
||||
prowler-gcp:
|
||||
extends: .prowler-base
|
||||
script:
|
||||
- prowler gcp --push-to-cloud
|
||||
variables:
|
||||
PROWLER_CLOUD_API_KEY: $PROWLER_CLOUD_API_KEY
|
||||
GOOGLE_APPLICATION_CREDENTIALS: $GCP_SERVICE_ACCOUNT_KEY
|
||||
```
|
||||
|
||||
## Tips and Best Practices
|
||||
|
||||
### When to Run Scans
|
||||
|
||||
* **Scheduled scans** (daily or weekly) provide continuous monitoring and are ideal for baseline security assessments
|
||||
* **On-merge scans** catch configuration changes introduced by new code
|
||||
* **Pull request scans** provide early feedback but should target specific services to keep execution times reasonable
|
||||
|
||||
### Handling Scan Failures
|
||||
|
||||
By default, Prowler exits with a non-zero code when it finds failing checks. This causes the CI/CD job to fail. To prevent scan results from blocking the pipeline:
|
||||
|
||||
* **GitHub Actions**: Add `continue-on-error: true` to the job
|
||||
* **GitLab CI**: Add `allow_failure: true` to the job
|
||||
|
||||
<Note>
|
||||
Ingestion failures (e.g., network issues reaching Prowler Cloud) do not affect the Prowler exit code. The scan completes normally and only a warning is emitted. See [Import Findings troubleshooting](/user-guide/tutorials/prowler-app-import-findings#troubleshooting) for details.
|
||||
</Note>
|
||||
|
||||
### Caching Prowler Installation
|
||||
|
||||
For faster pipeline runs, cache the Prowler installation:
|
||||
|
||||
**GitHub Actions:**
|
||||
```yaml
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-prowler
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
|
||||
- name: Install Prowler
|
||||
run: pip install prowler
|
||||
```
|
||||
|
||||
**GitLab CI:**
|
||||
```yaml
|
||||
prowler-scan:
|
||||
cache:
|
||||
paths:
|
||||
- .cache/pip
|
||||
variables:
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
```
|
||||
|
||||
### Output Formats
|
||||
|
||||
To generate additional report formats alongside the cloud upload:
|
||||
|
||||
```bash
|
||||
prowler aws --push-to-cloud -M csv,html -o /tmp/prowler-reports
|
||||
```
|
||||
|
||||
This produces CSV and HTML files locally while also pushing OCSF findings to Prowler Cloud. The local files can be stored as CI/CD artifacts for archival purposes.
|
||||
|
||||
### Scanning Multiple AWS Accounts
|
||||
|
||||
To scan multiple accounts sequentially in a single job, use [role assumption](/user-guide/providers/aws/role-assumption):
|
||||
|
||||
```bash
|
||||
prowler aws -R arn:aws:iam::111111111111:role/ProwlerScanRole --push-to-cloud
|
||||
prowler aws -R arn:aws:iam::222222222222:role/ProwlerScanRole --push-to-cloud
|
||||
```
|
||||
|
||||
Each scan run creates a separate ingestion job in Prowler Cloud.
|
||||
@@ -1,207 +0,0 @@
|
||||
---
|
||||
title: 'Run Kubernetes In-Cluster and Send Findings to Prowler Cloud'
|
||||
---
|
||||
|
||||
This cookbook walks through deploying Prowler inside a Kubernetes cluster on a recurring schedule and automatically sending findings to Prowler Cloud via [Import Findings](/user-guide/tutorials/prowler-app-import-findings). By the end, security scan results from the cluster appear in Prowler Cloud without any manual file uploads.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* A **Prowler Cloud** account with an active subscription (see [Prowler Cloud Pricing](https://prowler.com/pricing))
|
||||
* A Prowler Cloud **API key** with the **Manage Ingestions** permission (see [API Keys](/user-guide/tutorials/prowler-app-api-keys))
|
||||
* Access to a Kubernetes cluster with `kubectl` configured
|
||||
* Permissions to create ServiceAccounts, Roles, RoleBindings, Secrets, and CronJobs in the cluster
|
||||
|
||||
## Step 1: Create the ServiceAccount and RBAC Resources
|
||||
|
||||
Prowler needs a ServiceAccount with read access to cluster resources. Apply the manifests from the [`kubernetes` directory](https://github.com/prowler-cloud/prowler/tree/master/kubernetes) of the Prowler repository:
|
||||
|
||||
```console
|
||||
kubectl apply -f kubernetes/prowler-sa.yaml
|
||||
kubectl apply -f kubernetes/prowler-role.yaml
|
||||
kubectl apply -f kubernetes/prowler-rolebinding.yaml
|
||||
```
|
||||
|
||||
This creates:
|
||||
|
||||
* A `prowler-sa` ServiceAccount in the `prowler-ns` namespace
|
||||
* A ClusterRole with the read permissions Prowler requires
|
||||
* A ClusterRoleBinding linking the ServiceAccount to the role
|
||||
|
||||
For more details on these resources, refer to [Getting Started with Kubernetes](/user-guide/providers/kubernetes/getting-started-k8s).
|
||||
|
||||
## Step 2: Store the Prowler Cloud API Key as a Secret
|
||||
|
||||
Create a Kubernetes Secret to hold the API key securely:
|
||||
|
||||
```console
|
||||
kubectl create secret generic prowler-cloud-api-key \
|
||||
--from-literal=api-key=pk_your_api_key_here \
|
||||
--namespace prowler-ns
|
||||
```
|
||||
|
||||
Replace `pk_your_api_key_here` with the actual API key from Prowler Cloud.
|
||||
|
||||
<Warning>
|
||||
Avoid embedding the API key directly in the CronJob manifest. Using a Kubernetes Secret keeps credentials out of version control and pod specs.
|
||||
</Warning>
|
||||
|
||||
## Step 3: Create the CronJob Manifest
|
||||
|
||||
The CronJob runs Prowler on a schedule, scanning the cluster and pushing findings to Prowler Cloud with the `--push-to-cloud` flag.
|
||||
|
||||
Create a file named `prowler-cronjob.yaml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: prowler-k8s-scan
|
||||
namespace: prowler-ns
|
||||
spec:
|
||||
schedule: "0 2 * * *" # Runs daily at 02:00 UTC
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prowler
|
||||
spec:
|
||||
serviceAccountName: prowler-sa
|
||||
containers:
|
||||
- name: prowler
|
||||
image: prowlercloud/prowler:stable
|
||||
args:
|
||||
- "kubernetes"
|
||||
- "--push-to-cloud"
|
||||
env:
|
||||
- name: PROWLER_CLOUD_API_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: prowler-cloud-api-key
|
||||
key: api-key
|
||||
- name: CLUSTER_NAME
|
||||
value: "my-cluster"
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: var-lib-cni
|
||||
mountPath: /var/lib/cni
|
||||
readOnly: true
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
readOnly: true
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
readOnly: true
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-cni
|
||||
hostPath:
|
||||
path: /var/lib/cni
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: /var/lib/etcd
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
```
|
||||
|
||||
<Note>
|
||||
Replace `my-cluster` with a meaningful name for the cluster. This value appears in Prowler Cloud reports and helps identify the source of findings. See the `--cluster-name` flag documentation in [Getting Started with Kubernetes](/user-guide/providers/kubernetes/getting-started-k8s) for more details.
|
||||
</Note>
|
||||
|
||||
### Customizing the Schedule
|
||||
|
||||
The `schedule` field uses standard cron syntax. Common examples:
|
||||
|
||||
* `"0 2 * * *"` — daily at 02:00 UTC
|
||||
* `"0 */6 * * *"` — every 6 hours
|
||||
* `"0 2 * * 1"` — weekly on Mondays at 02:00 UTC
|
||||
|
||||
### Scanning Specific Namespaces
|
||||
|
||||
To limit the scan to specific namespaces, add the `--namespace` flag to the `args` array:
|
||||
|
||||
```yaml
|
||||
args:
|
||||
- "kubernetes"
|
||||
- "--push-to-cloud"
|
||||
- "--namespace"
|
||||
- "production,staging"
|
||||
```
|
||||
|
||||
## Step 4: Deploy and Verify
|
||||
|
||||
Apply the CronJob to the cluster:
|
||||
|
||||
```console
|
||||
kubectl apply -f prowler-cronjob.yaml
|
||||
```
|
||||
|
||||
To trigger an immediate test run without waiting for the schedule:
|
||||
|
||||
```console
|
||||
kubectl create job prowler-test-run --from=cronjob/prowler-k8s-scan -n prowler-ns
|
||||
```
|
||||
|
||||
Monitor the job execution:
|
||||
|
||||
```console
|
||||
kubectl get pods -n prowler-ns -l app=prowler --watch
|
||||
```
|
||||
|
||||
Check the logs to confirm findings were pushed successfully:
|
||||
|
||||
```console
|
||||
kubectl logs -n prowler-ns -l app=prowler --tail=50
|
||||
```
|
||||
|
||||
A successful upload produces output similar to:
|
||||
|
||||
```
|
||||
Pushing findings to Prowler Cloud, please wait...
|
||||
|
||||
Findings successfully pushed to Prowler Cloud. Ingestion job: fa8bc8c5-4925-46a0-9fe0-f6575905e094
|
||||
See more details here: https://cloud.prowler.com/scans
|
||||
```
|
||||
|
||||
## Step 5: View Findings in Prowler Cloud
|
||||
|
||||
Once the job completes and findings are pushed:
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/)
|
||||
2. Open the "Scans" section to verify the ingestion job status
|
||||
3. Browse findings under the Kubernetes provider
|
||||
|
||||
For details on the ingestion workflow and status tracking, refer to the [Import Findings](/user-guide/tutorials/prowler-app-import-findings) documentation.
|
||||
|
||||
## Tips and Troubleshooting
|
||||
|
||||
* **Resource limits**: For large clusters, consider setting `resources.requests` and `resources.limits` on the container to prevent the scan from consuming excessive cluster resources.
|
||||
* **Network policies**: Ensure the Prowler pod can reach `api.prowler.com` over HTTPS (port 443). Adjust NetworkPolicies or egress rules if needed.
|
||||
* **Job history**: Kubernetes retains completed and failed jobs by default. Set `successfulJobsHistoryLimit` and `failedJobsHistoryLimit` in the CronJob spec to control cleanup:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
successfulJobsHistoryLimit: 3
|
||||
failedJobsHistoryLimit: 1
|
||||
```
|
||||
|
||||
* **API key rotation**: When rotating the API key, update the Secret and restart any running jobs:
|
||||
|
||||
```console
|
||||
kubectl delete secret prowler-cloud-api-key -n prowler-ns
|
||||
kubectl create secret generic prowler-cloud-api-key \
|
||||
--from-literal=api-key=pk_new_api_key_here \
|
||||
--namespace prowler-ns
|
||||
```
|
||||
|
||||
* **Failed uploads**: If the push to Prowler Cloud fails, the scan still completes and findings are saved locally in the container. Check the [Import Findings troubleshooting section](/user-guide/tutorials/prowler-app-import-findings#troubleshooting) for common error messages.
|
||||
@@ -2,13 +2,9 @@
|
||||
title: 'Google Workspace Authentication in Prowler'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.19.0" />
|
||||
|
||||
Prowler for Google Workspace uses a **Service Account with Domain-Wide Delegation** to authenticate to the Google Workspace Admin SDK. This allows Prowler to read directory data on behalf of a super administrator without requiring an interactive login.
|
||||
|
||||
## Required Open Authorization (OAuth) Scopes
|
||||
## Required OAuth Scopes
|
||||
|
||||
Prowler requests the following read-only OAuth 2.0 scopes from the Google Workspace Admin SDK:
|
||||
|
||||
@@ -24,16 +20,16 @@ The delegated user must be a **super administrator** in your Google Workspace or
|
||||
|
||||
## Setup Steps
|
||||
|
||||
### Step 1: Create a Google Cloud Platform (GCP) Project (if Needed)
|
||||
### Step 1: Create a GCP Project (if needed)
|
||||
|
||||
If no GCP project exists, create one at [https://console.cloud.google.com](https://console.cloud.google.com).
|
||||
If you don't have a GCP project, create one at [https://console.cloud.google.com](https://console.cloud.google.com).
|
||||
|
||||
The project is only used to host the Service Account — it does not need to have any Google Workspace data in it.
|
||||
|
||||
### Step 2: Enable the Admin SDK API
|
||||
|
||||
1. Navigate to the [Google Cloud Console](https://console.cloud.google.com)
|
||||
2. Select the target project
|
||||
1. Go to the [Google Cloud Console](https://console.cloud.google.com)
|
||||
2. Select your project
|
||||
3. Navigate to **APIs & Services → Library**
|
||||
4. Search for **Admin SDK API**
|
||||
5. Click **Enable**
|
||||
@@ -52,8 +48,8 @@ The Service Account does not need any GCP IAM roles. Its access to Google Worksp
|
||||
|
||||
### Step 4: Generate a JSON Key
|
||||
|
||||
1. Click the newly created Service Account
|
||||
2. Navigate to the **Keys** tab
|
||||
1. Click on the Service Account you just created
|
||||
2. Go to the **Keys** tab
|
||||
3. Click **Add Key → Create new key**
|
||||
4. Select **JSON** format
|
||||
5. Click **Create** — the key file will download automatically
|
||||
@@ -65,7 +61,7 @@ This JSON key grants access to your Google Workspace organization. Never commit
|
||||
|
||||
### Step 5: Configure Domain-Wide Delegation in Google Workspace
|
||||
|
||||
1. Navigate to the [Google Workspace Admin Console](https://admin.google.com)
|
||||
1. Go to the [Google Workspace Admin Console](https://admin.google.com)
|
||||
2. Navigate to **Security → Access and data control → API controls**
|
||||
3. Click **Manage Domain Wide Delegation**
|
||||
4. Click **Add new**
|
||||
@@ -82,26 +78,23 @@ https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.google
|
||||
Domain-Wide Delegation must be configured by a Google Workspace **super administrator**. It may take a few minutes to propagate after saving.
|
||||
</Note>
|
||||
|
||||
### Step 6: Provide Credentials to Prowler
|
||||
### Step 6: Store Credentials Securely
|
||||
|
||||
- **Prowler Cloud:** Paste the Service Account JSON content and enter the delegated user email in the credentials form when configuring the Google Workspace provider.
|
||||
- **Prowler CLI:** Export the credentials as environment variables:
|
||||
Set your credentials as environment variables:
|
||||
|
||||
```console
|
||||
```bash
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_FILE="/path/to/googleworkspace-sa.json"
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
prowler googleworkspace
|
||||
```
|
||||
|
||||
Alternatively, to pass credentials as a string (e.g., in CI/CD pipelines):
|
||||
Alternatively, if you need to pass credentials as a string (e.g., in CI/CD pipelines):
|
||||
|
||||
```console
|
||||
```bash
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_CONTENT=$(cat /path/to/googleworkspace-sa.json)
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
prowler googleworkspace
|
||||
```
|
||||
|
||||
## How Prowler Resolves Credentials
|
||||
## Credential Lookup Order
|
||||
|
||||
Prowler resolves credentials in the following order:
|
||||
|
||||
@@ -154,7 +147,7 @@ The Service Account cannot impersonate the delegated user. This usually means Do
|
||||
- All three required OAuth scopes are included
|
||||
- The delegated user is a super administrator
|
||||
|
||||
### Permission Denied on Admin SDK Calls
|
||||
### Permission Denied on Admin SDK calls
|
||||
|
||||
If Prowler connects but returns empty results or permission errors for specific API calls:
|
||||
|
||||
|
||||
@@ -1,131 +1,100 @@
|
||||
---
|
||||
title: 'Getting Started With Google Workspace on Prowler'
|
||||
title: 'Getting Started with Google Workspace'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
Prowler for Google Workspace audits the organization's Google Workspace environment for security misconfigurations, including super administrator account hygiene, domain settings, and more.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Set up authentication for Google Workspace with the [Google Workspace Authentication](/user-guide/providers/googleworkspace/authentication) guide before starting either path:
|
||||
|
||||
- **Service Account:** Create a Service Account in a GCP project with Domain-Wide Delegation enabled.
|
||||
- **OAuth Scopes:** Authorize the required read-only OAuth scopes in the Google Workspace Admin Console.
|
||||
- **Customer ID:** Identify the Google Workspace Customer ID to use as the provider identifier.
|
||||
- **Delegated User:** Have the email of a super administrator to use as the delegated user.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Prowler Cloud" icon="cloud" href="#prowler-cloud">
|
||||
Onboard Google Workspace using Prowler Cloud
|
||||
</Card>
|
||||
<Card title="Prowler CLI" icon="terminal" href="#prowler-cli">
|
||||
Onboard Google Workspace using Prowler CLI
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Prowler Cloud
|
||||
|
||||
<VersionBadge version="5.21.0" />
|
||||
|
||||
### Step 1: Locate the Customer ID
|
||||
|
||||
1. Log into the [Google Workspace Admin Console](https://admin.google.com).
|
||||
2. Navigate to "Account" > "Account Settings".
|
||||
3. Find the **Customer ID** on the Account Settings page.
|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
The Customer ID starts with the letter "C" followed by alphanumeric characters (e.g., `C0xxxxxxx`). This value acts as the unique identifier for the Google Workspace account in Prowler Cloud.
|
||||
</Note>
|
||||
|
||||
### Step 2: Open Prowler Cloud
|
||||
|
||||
1. Go to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Navigate to "Configuration" > "Cloud Providers".
|
||||
|
||||

|
||||
|
||||
3. Click "Add Cloud Provider".
|
||||
|
||||

|
||||
|
||||
4. Select "Google Workspace".
|
||||
|
||||

|
||||
|
||||
### Step 3: Provide Credentials
|
||||
|
||||
1. Enter the **Customer ID** and an optional alias, then click "Next".
|
||||
|
||||

|
||||
|
||||
2. Paste the **Service Account JSON** credentials content.
|
||||
3. Enter the "Delegated User Email" (a super administrator in the Google Workspace organization).
|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
The Service Account JSON is the full content of the key file downloaded when creating the Service Account. Paste the entire JSON object, not just the file path. For setup instructions, see the [Authentication guide](/user-guide/providers/googleworkspace/authentication).
|
||||
</Note>
|
||||
|
||||
### Step 4: Check Connection
|
||||
|
||||
1. Click "Check Connection" to verify that the credentials and Domain-Wide Delegation are configured correctly.
|
||||
2. Prowler will test the Service Account impersonation and Admin SDK access.
|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
If the connection test fails, verify that Domain-Wide Delegation is properly configured and that all three OAuth scopes are authorized. It may take a few minutes for delegation changes to propagate. See the [Troubleshooting](/user-guide/providers/googleworkspace/authentication#troubleshooting) section for common errors.
|
||||
</Note>
|
||||
|
||||
### Step 5: Launch the Scan
|
||||
|
||||
1. Review the summary.
|
||||
2. Click "Launch Scan" to start auditing Google Workspace.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Prowler CLI
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx";
|
||||
|
||||
<VersionBadge version="5.19.0" />
|
||||
|
||||
Prowler for Google Workspace allows you to audit your organization's Google Workspace environment for security misconfigurations, including super administrator account hygiene, domain settings, and more.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before running Prowler with the Google Workspace provider, ensure you have:
|
||||
|
||||
1. A Google Workspace account with super administrator privileges
|
||||
2. A Google Cloud Platform (GCP) project to host the Service Account
|
||||
3. Authentication configured (see [Authentication](/user-guide/providers/googleworkspace/authentication)):
|
||||
- A **Service Account JSON key** from a GCP project with Domain-Wide Delegation enabled
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Step 1: Set Up Authentication
|
||||
|
||||
Set your Service Account credentials and delegated user email following the [Google Workspace Authentication](/user-guide/providers/googleworkspace/authentication) guide:
|
||||
Set your Service Account credentials file path and delegated user email as environment variables:
|
||||
|
||||
```console
|
||||
```bash
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_FILE="/path/to/service-account-key.json"
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
```
|
||||
|
||||
Alternatively, pass the credentials content directly as a JSON string:
|
||||
### Step 2: Run Prowler
|
||||
|
||||
```console
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_CONTENT='{"type": "service_account", ...}'
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
```
|
||||
|
||||
### Step 2: Run the First Scan
|
||||
|
||||
Run a baseline scan after credentials are configured:
|
||||
|
||||
```console
|
||||
```bash
|
||||
prowler googleworkspace
|
||||
```
|
||||
|
||||
Prowler authenticates as the delegated user and runs all available security checks against the Google Workspace organization.
|
||||
Prowler will authenticate as the delegated user and run all available security checks against your Google Workspace organization.
|
||||
|
||||
### Step 3: Use a Custom Configuration (Optional)
|
||||
## Authentication
|
||||
|
||||
Prowler uses a **Service Account with Domain-Wide Delegation** to authenticate to Google Workspace. This requires:
|
||||
|
||||
- A Service Account created in a GCP project
|
||||
- The Admin SDK API enabled in that project
|
||||
- Domain-Wide Delegation configured in the Google Workspace Admin Console
|
||||
- A super admin user email to impersonate
|
||||
|
||||
### Using Environment Variables (Recommended)
|
||||
|
||||
```bash
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_FILE="/path/to/service-account-key.json"
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
prowler googleworkspace
|
||||
```
|
||||
|
||||
Alternatively, pass the credentials content directly as a JSON string:
|
||||
|
||||
```bash
|
||||
export GOOGLEWORKSPACE_CREDENTIALS_CONTENT='{"type": "service_account", ...}'
|
||||
export GOOGLEWORKSPACE_DELEGATED_USER="admin@yourdomain.com"
|
||||
prowler googleworkspace
|
||||
```
|
||||
|
||||
<Note>
|
||||
The delegated user must be a super admin email in your Google Workspace organization. The service account credentials must be provided via environment variables (`GOOGLEWORKSPACE_CREDENTIALS_FILE` or `GOOGLEWORKSPACE_CREDENTIALS_CONTENT`).
|
||||
</Note>
|
||||
|
||||
## Understanding the Output
|
||||
|
||||
When Prowler runs successfully, it will display the credentials being used:
|
||||
|
||||
```
|
||||
Using the Google Workspace credentials below:
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Google Workspace Domain: yourdomain.com │
|
||||
│ Customer ID: C0xxxxxxx │
|
||||
│ Delegated User: admin@yourdomain.com │
|
||||
│ Authentication Method: Service Account with Domain-Wide │
|
||||
│ Delegation │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Findings are reported per check. For example, the `directory_super_admin_count` check verifies the number of super administrators is within a recommended range (2–4):
|
||||
|
||||
- **PASS** — 2 to 4 super administrators found
|
||||
- **FAIL** — 0 or 1 (single point of failure) or 5+ (excessive privilege exposure)
|
||||
|
||||
Output files are saved in the configured output directory (default: `output/`) in CSV, JSON-OCSF, and HTML formats.
|
||||
|
||||
## Configuration
|
||||
|
||||
Prowler uses a configuration file to customize provider behavior. To use a custom configuration:
|
||||
|
||||
```console
|
||||
```bash
|
||||
prowler googleworkspace --config-file /path/to/config.yaml
|
||||
```
|
||||
|
||||
---
|
||||
## Next Steps
|
||||
|
||||
- [Authentication](/user-guide/providers/googleworkspace/authentication) — Detailed guide on setting up a Service Account and Domain-Wide Delegation
|
||||
|
||||
@@ -164,7 +164,3 @@ env:
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
To set up a production-ready CronJob that runs Prowler on a schedule and sends findings to Prowler Cloud, see the [Run Kubernetes In-Cluster and Send Findings to Prowler Cloud](/user-guide/cookbooks/kubernetes-in-cluster) cookbook.
|
||||
</Tip>
|
||||
|
||||
@@ -202,135 +202,15 @@ To expand the graph for detailed exploration, click the fullscreen icon in the g
|
||||
width="700"
|
||||
/>
|
||||
|
||||
## Using Attack Paths with the MCP Server and Lighthouse AI
|
||||
## Using Attack Paths with the MCP Server
|
||||
|
||||
Attack Paths capabilities are also available through the [Prowler MCP Server](/getting-started/products/prowler-mcp), enabling interaction with Attack Paths data via AI assistants like Claude Desktop, Cursor, and other MCP clients.
|
||||
|
||||
[Prowler Lighthouse AI](/getting-started/products/prowler-lighthouse-ai) also supports Attack Paths queries, allowing you to analyze privilege escalation chains and security misconfigurations directly from the chat interface.
|
||||
|
||||
The following MCP tools are available for Attack Paths:
|
||||
|
||||
- **`prowler_app_list_attack_paths_scans`** - List and filter Attack Paths scans
|
||||
- **`prowler_app_list_attack_paths_queries`** - Discover available queries for a completed scan
|
||||
- **`prowler_app_run_attack_paths_query`** - Execute a query and retrieve graph results with nodes and relationships
|
||||
- **`prowler_app_get_attack_paths_cartography_schema`** - Retrieve the Cartography graph schema for custom openCypher queries
|
||||
|
||||
### Example Questions
|
||||
|
||||
Ask through the MCP Server or Lighthouse AI:
|
||||
|
||||
- "Find EC2 instances exposed to the internet with access to sensitive S3 buckets"
|
||||
- "Are there any IAM roles that can escalate their own privileges?"
|
||||
- "Show me all internet-facing resources with open security groups"
|
||||
- "Which principals can create Lambda functions with privileged roles?"
|
||||
- "List all RDS instances with storage encryption disabled"
|
||||
- "Find S3 buckets that allow anonymous access"
|
||||
- "Are there any CloudFormation stacks that could be hijacked for privilege escalation?"
|
||||
- "Show me all roles that can be assumed for lateral movement"
|
||||
|
||||
### Supported Queries
|
||||
|
||||
Attack Paths currently supports the following built-in queries for AWS:
|
||||
|
||||
#### Custom Attack Path Queries
|
||||
|
||||
| Query | Description |
|
||||
|---|---|
|
||||
| **Internet-Exposed EC2 with Sensitive S3 Access** | Find SSH-exposed EC2 instances that can assume roles to read tagged sensitive S3 buckets |
|
||||
|
||||
#### Basic Resource Queries
|
||||
|
||||
| Query | Description |
|
||||
|---|---|
|
||||
| **RDS Instances Inventory** | List all provisioned RDS database instances in the account |
|
||||
| **Unencrypted RDS Instances** | Find RDS instances with storage encryption disabled |
|
||||
| **S3 Buckets with Anonymous Access** | Find S3 buckets that allow anonymous access |
|
||||
| **IAM Statements Allowing All Actions** | Find IAM policy statements that allow all actions via wildcard (\*) |
|
||||
| **IAM Statements Allowing Policy Deletion** | Find IAM policy statements that allow iam:DeletePolicy |
|
||||
| **IAM Statements Allowing Create Actions** | Find IAM policy statements that allow any create action |
|
||||
|
||||
#### Network Exposure Queries
|
||||
|
||||
| Query | Description |
|
||||
|---|---|
|
||||
| **Internet-Exposed EC2 Instances** | Find EC2 instances flagged as exposed to the internet |
|
||||
| **Open Security Groups on Internet-Facing Resources** | Find internet-facing resources with security groups allowing inbound from 0.0.0.0/0 |
|
||||
| **Internet-Exposed Classic Load Balancers** | Find Classic Load Balancers exposed to the internet with their listeners |
|
||||
| **Internet-Exposed ALB/NLB Load Balancers** | Find ELBv2 (ALB/NLB) load balancers exposed to the internet with their listeners |
|
||||
| **Resource Lookup by Public IP** | Find the AWS resource associated with a given public IP address |
|
||||
|
||||
#### Privilege Escalation Queries
|
||||
|
||||
These queries are based on research from [pathfinding.cloud](https://pathfinding.cloud) by Datadog.
|
||||
|
||||
| Query | Description |
|
||||
|---|---|
|
||||
| **App Runner Service Creation with Privileged Role (APPRUNNER-001)** | Create an App Runner service with a privileged IAM role to gain its permissions |
|
||||
| **App Runner Service Update for Role Access (APPRUNNER-002)** | Update an existing App Runner service to leverage its already-attached privileged role |
|
||||
| **Bedrock Code Interpreter with Privileged Role (BEDROCK-001)** | Create a Bedrock AgentCore Code Interpreter with a privileged role attached |
|
||||
| **Bedrock Code Interpreter Session Hijacking (BEDROCK-002)** | Start a session on an existing Bedrock code interpreter to exfiltrate its privileged role credentials |
|
||||
| **CloudFormation Stack Creation with Privileged Role (CLOUDFORMATION-001)** | Create a CloudFormation stack with a privileged role to provision arbitrary AWS resources |
|
||||
| **CloudFormation Stack Update for Role Access (CLOUDFORMATION-002)** | Update an existing CloudFormation stack to leverage its already-attached privileged service role |
|
||||
| **CloudFormation StackSet Creation with Privileged Role (CLOUDFORMATION-003)** | Create a CloudFormation StackSet with a privileged execution role to provision arbitrary resources across accounts |
|
||||
| **CloudFormation StackSet Update with Privileged Role (CLOUDFORMATION-004)** | Update an existing CloudFormation StackSet to inject malicious resources using a privileged execution role |
|
||||
| **CloudFormation Change Set Privilege Escalation (CLOUDFORMATION-005)** | Create and execute a change set on an existing stack to leverage its privileged service role |
|
||||
| **CodeBuild Project Creation with Privileged Role (CODEBUILD-001)** | Create a CodeBuild project with a privileged role to execute arbitrary code via a malicious buildspec |
|
||||
| **CodeBuild Buildspec Override for Role Access (CODEBUILD-002)** | Start a build on an existing CodeBuild project with a buildspec override to execute code with its privileged role |
|
||||
| **CodeBuild Batch Buildspec Override for Role Access (CODEBUILD-003)** | Start a batch build on an existing CodeBuild project with a buildspec override to execute code with its privileged role |
|
||||
| **CodeBuild Batch Project Creation with Privileged Role (CODEBUILD-004)** | Create a CodeBuild project configured for batch builds with a privileged role to execute arbitrary code via a malicious buildspec |
|
||||
| **Data Pipeline Creation with Privileged Role (DATAPIPELINE-001)** | Create a Data Pipeline with a privileged role to execute arbitrary commands on provisioned infrastructure |
|
||||
| **EC2 Instance Launch with Privileged Role (EC2-001)** | Launch EC2 instances with privileged IAM roles to gain their permissions via IMDS |
|
||||
| **EC2 Role Hijacking via UserData Injection (EC2-002)** | Inject malicious scripts into EC2 instance userData to gain the attached role's permissions |
|
||||
| **Spot Instance Launch with Privileged Role (EC2-003)** | Launch EC2 Spot Instances with privileged IAM roles to gain their permissions via IMDS |
|
||||
| **Launch Template Poisoning for Role Access (EC2-004)** | Inject malicious userData into launch templates that reference privileged roles, no PassRole needed |
|
||||
| **EC2 Instance Connect SSH Access for Role Credentials (EC2INSTANCECONNECT-003)** | Push a temporary SSH key to an EC2 instance via Instance Connect to access its attached role credentials through IMDS |
|
||||
| **ECS Service Creation with Privileged Role (ECS-001 - New Cluster)** | Create an ECS cluster and service with a privileged Fargate task role to execute arbitrary code |
|
||||
| **ECS Task Execution with Privileged Role (ECS-002 - New Cluster)** | Create an ECS cluster and run a one-off Fargate task with a privileged role to execute arbitrary code |
|
||||
| **ECS Service Creation with Privileged Role (ECS-003 - Existing Cluster)** | Deploy a Fargate service with a privileged role on an existing ECS cluster |
|
||||
| **ECS Task Execution with Privileged Role (ECS-004 - Existing Cluster)** | Run a one-off Fargate task with a privileged role on an existing ECS cluster |
|
||||
| **ECS Task Start with Privileged Role on EC2 (ECS-005 - Existing Cluster)** | Register a task definition with a privileged role and start it on an EC2 container instance to execute arbitrary code |
|
||||
| **ECS Exec Container Hijacking for Role Credentials (ECS-006)** | Shell into a running ECS container via ECS Exec to steal the attached task role's credentials |
|
||||
| **Glue Dev Endpoint with Privileged Role (GLUE-001)** | Create a Glue development endpoint with a privileged role attached to gain its permissions |
|
||||
| **Glue Dev Endpoint SSH Hijacking via Update (GLUE-002)** | Update an existing Glue development endpoint to inject an SSH public key and access its attached role credentials |
|
||||
| **Glue Job Creation with Privileged Role (GLUE-003)** | Create a Glue job with a privileged role and start it to execute arbitrary code with that role's permissions |
|
||||
| **Glue Job Creation with Scheduled Trigger and Privileged Role (GLUE-004)** | Create a Glue job with a privileged role and a scheduled trigger to persistently execute arbitrary code |
|
||||
| **Glue Job Hijacking via Update with Privileged Role (GLUE-005)** | Update an existing Glue job to attach a privileged role and inject malicious code, then start it to gain that role's permissions |
|
||||
| **Glue Job Hijacking with Scheduled Trigger and Privileged Role (GLUE-006)** | Update an existing Glue job to attach a privileged role and inject malicious code, then create a scheduled trigger for persistent automated execution |
|
||||
| **Policy Version Override for Self-Escalation (IAM-001)** | Create a new version of an attached policy with administrative permissions, instantly escalating the principal's own privileges |
|
||||
| **Access Key Creation for Lateral Movement (IAM-002)** | Create access keys for other IAM users to gain their permissions and move laterally across the account |
|
||||
| **Access Key Rotation Attack for Lateral Movement (IAM-003)** | Delete and recreate access keys for other IAM users to bypass the two-key limit and gain their permissions |
|
||||
| **Console Login Profile Creation for Lateral Movement (IAM-004)** | Create console login profiles for other IAM users to access the AWS Console with their permissions |
|
||||
| **Inline Policy Injection for Self-Escalation (IAM-005)** | Attach an inline policy with administrative permissions to your own role, instantly escalating privileges |
|
||||
| **Console Password Override for Lateral Movement (IAM-006)** | Change the console password of other IAM users to log in as them and gain their permissions |
|
||||
| **Inline Policy Injection on User for Self-Escalation (IAM-007)** | Attach an inline policy with administrative permissions to your own IAM user, instantly escalating privileges |
|
||||
| **Managed Policy Attachment on User for Self-Escalation (IAM-008)** | Attach existing managed policies with administrative permissions to your own IAM user, instantly escalating privileges |
|
||||
| **Managed Policy Attachment on Role for Self-Escalation (IAM-009)** | Attach existing managed policies with administrative permissions to your own IAM role, instantly escalating privileges |
|
||||
| **Managed Policy Attachment on Group for Self-Escalation (IAM-010)** | Attach existing managed policies with administrative permissions to a group you belong to, escalating privileges for all group members |
|
||||
| **Inline Policy Injection on Group for Self-Escalation (IAM-011)** | Attach an inline policy with administrative permissions to a group you belong to, escalating privileges for all group members |
|
||||
| **Trust Policy Hijacking for Role Assumption (IAM-012)** | Modify a role's trust policy to allow yourself to assume it, gaining the role's permissions |
|
||||
| **Group Membership Hijacking for Privilege Escalation (IAM-013)** | Add yourself to a privileged IAM group to inherit its permissions, gaining access to all policies attached to the group |
|
||||
| **Managed Policy Attachment with Role Assumption for Lateral Movement (IAM-014)** | Attach administrative managed policies to another role you can assume, then assume it to gain elevated privileges |
|
||||
| **Managed Policy Attachment with Access Key Creation for Lateral Movement (IAM-015)** | Attach administrative managed policies to another IAM user and create access keys for them to gain programmatic access with elevated privileges |
|
||||
| **Policy Version Override with Role Assumption for Lateral Movement (IAM-016)** | Create a new version of a customer-managed policy attached to another role with administrative permissions, then assume that role to gain elevated access |
|
||||
| **Inline Policy Injection with Role Assumption for Lateral Movement (IAM-017)** | Attach an inline policy with administrative permissions to another role you can assume, then assume it to gain elevated privileges |
|
||||
| **Inline Policy Injection with Access Key Creation for Lateral Movement (IAM-018)** | Attach an inline policy with administrative permissions to another IAM user and create access keys for them to gain programmatic access with elevated privileges |
|
||||
| **Managed Policy Attachment with Trust Policy Hijacking for Privilege Escalation (IAM-019)** | Attach administrative managed policies to a role and modify its trust policy to allow yourself to assume it, gaining elevated privileges without prior assume-role access |
|
||||
| **Policy Version Override with Trust Policy Hijacking for Privilege Escalation (IAM-020)** | Create a new version of a customer-managed policy attached to a role with administrative permissions and modify its trust policy to assume it, without prior assume-role access |
|
||||
| **Inline Policy Injection with Trust Policy Hijacking for Privilege Escalation (IAM-021)** | Add an inline policy with administrative permissions to a role and modify its trust policy to allow yourself to assume it, gaining elevated privileges without prior assume-role access |
|
||||
| **Lambda Function Creation with Privileged Role (LAMBDA-001)** | Create a Lambda function with a privileged IAM role and invoke it to execute code with that role's permissions |
|
||||
| **Lambda Function Creation with Event Source Trigger (LAMBDA-002)** | Create a Lambda function with a privileged IAM role and an event source mapping to trigger it automatically, executing code with the role's permissions |
|
||||
| **Lambda Function Code Injection (LAMBDA-003)** | Modify the code of an existing Lambda function to execute arbitrary commands with the function's execution role permissions |
|
||||
| **Lambda Function Code Injection with Direct Invocation (LAMBDA-004)** | Modify the code of an existing Lambda function and invoke it directly to execute arbitrary commands with the function's execution role permissions |
|
||||
| **Lambda Function Code Injection with Resource Policy Grant (LAMBDA-005)** | Modify the code of an existing Lambda function and grant yourself invocation permission via its resource-based policy to execute code with the function's execution role |
|
||||
| **Lambda Function Creation with Resource Policy Invocation (LAMBDA-006)** | Create a Lambda function with a privileged IAM role and grant yourself invocation permission via its resource-based policy to execute code with the role's permissions |
|
||||
| **SageMaker Notebook Creation with Privileged Role (SAGEMAKER-001)** | Create a SageMaker notebook instance with a privileged IAM role to execute arbitrary code with the role's permissions via the Jupyter environment |
|
||||
| **SageMaker Training Job Creation with Privileged Role (SAGEMAKER-002)** | Create a SageMaker training job with a privileged IAM role to execute arbitrary container code with the role's permissions |
|
||||
| **SageMaker Processing Job Creation with Privileged Role (SAGEMAKER-003)** | Create a SageMaker processing job with a privileged IAM role to execute arbitrary container code with the role's permissions |
|
||||
| **SageMaker Presigned Notebook URL for Privilege Escalation (SAGEMAKER-004)** | Generate a presigned URL to access an existing SageMaker notebook instance and execute code with its execution role's permissions |
|
||||
| **SageMaker Notebook Lifecycle Config Injection (SAGEMAKER-005)** | Inject a malicious lifecycle configuration into an existing SageMaker notebook to execute code with the notebook's execution role during startup |
|
||||
| **SSM Session Access for EC2 Role Credentials (SSM-001)** | Start an SSM session on an EC2 instance to access its attached role credentials through IMDS |
|
||||
| **SSM Send Command for EC2 Role Credentials (SSM-002)** | Execute commands on an EC2 instance via SSM Run Command to access its attached role credentials through IMDS |
|
||||
| **Role Assumption for Privilege Escalation (STS-001)** | Assume IAM roles with elevated permissions by exploiting bidirectional trust between the starting principal and the target role |
|
||||
|
||||
These tools enable workflows such as:
|
||||
- Asking an AI assistant to identify privilege escalation paths in a specific AWS account
|
||||
|
||||
@@ -2,12 +2,6 @@
|
||||
|
||||
All notable changes to the **Prowler MCP Server** are documented in this file.
|
||||
|
||||
## [0.5.0] (Prowler v5.21.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- Attack Path tool to get Neo4j DB schema [(#10321)](https://github.com/prowler-cloud/prowler/pull/10321)
|
||||
|
||||
## [0.4.0] (Prowler v5.19.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
@@ -5,7 +5,7 @@ This package provides MCP tools for accessing:
|
||||
- Prowler Hub: All security artifacts (detections, remediations and frameworks) supported by Prowler
|
||||
"""
|
||||
|
||||
__version__ = "0.5.0"
|
||||
__version__ = "0.4.0"
|
||||
__author__ = "Prowler Team"
|
||||
__email__ = "engineering@prowler.com"
|
||||
|
||||
|
||||
@@ -118,51 +118,6 @@ class AttackPathScansListResponse(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class AttackPathCartographySchema(MinimalSerializerMixin, BaseModel):
|
||||
"""Cartography graph schema metadata for a completed attack paths scan.
|
||||
|
||||
Contains the schema URL and provider info needed to fetch the full
|
||||
Cartography schema markdown for openCypher query generation.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
id: str = Field(description="Unique identifier for the schema resource")
|
||||
provider: str = Field(description="Cloud provider type (aws, azure, gcp, etc.)")
|
||||
cartography_version: str = Field(description="Version of the Cartography schema")
|
||||
schema_url: str = Field(description="URL to the Cartography schema page on GitHub")
|
||||
raw_schema_url: str = Field(
|
||||
description="Raw URL to fetch the Cartography schema markdown content"
|
||||
)
|
||||
schema_content: str | None = Field(
|
||||
default=None,
|
||||
description="Full Cartography schema markdown content (populated after fetch)",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(
|
||||
cls, response: dict[str, Any]
|
||||
) -> "AttackPathCartographySchema":
|
||||
"""Transform JSON:API schema response to model.
|
||||
|
||||
Args:
|
||||
response: Full API response with data and attributes
|
||||
|
||||
Returns:
|
||||
AttackPathCartographySchema instance
|
||||
"""
|
||||
data = response.get("data", {})
|
||||
attributes = data.get("attributes", {})
|
||||
|
||||
return cls(
|
||||
id=data["id"],
|
||||
provider=attributes["provider"],
|
||||
cartography_version=attributes["cartography_version"],
|
||||
schema_url=attributes["schema_url"],
|
||||
raw_schema_url=attributes["raw_schema_url"],
|
||||
)
|
||||
|
||||
|
||||
class AttackPathQueryParameter(MinimalSerializerMixin, BaseModel):
|
||||
"""Parameter definition for an attack paths query.
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ through cloud infrastructure relationships.
|
||||
from typing import Any, Literal
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.attack_paths import (
|
||||
AttackPathCartographySchema,
|
||||
AttackPathQuery,
|
||||
AttackPathQueryResult,
|
||||
AttackPathScansListResponse,
|
||||
@@ -226,53 +225,3 @@ class AttackPathsTools(BaseTool):
|
||||
f"Failed to run attack paths query '{query_id}' on scan {scan_id}: {e}"
|
||||
)
|
||||
return {"error": f"Failed to run attack paths query '{query_id}': {str(e)}"}
|
||||
|
||||
async def get_attack_paths_cartography_schema(
|
||||
self,
|
||||
scan_id: str = Field(
|
||||
description="UUID of a COMPLETED attack paths scan. Use `prowler_app_list_attack_paths_scans` with state=['completed'] to find scan IDs"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve the Cartography graph schema for a completed attack paths scan.
|
||||
|
||||
This tool fetches the full Cartography schema (node labels, relationships,
|
||||
and properties) so the LLM can write accurate custom openCypher queries
|
||||
for attack paths analysis.
|
||||
|
||||
Two-step flow:
|
||||
1. Calls the Prowler API to get schema metadata (provider, version, URLs)
|
||||
2. Fetches the raw Cartography schema markdown from GitHub
|
||||
|
||||
Returns:
|
||||
- id: Schema resource identifier
|
||||
- provider: Cloud provider type
|
||||
- cartography_version: Schema version
|
||||
- schema_url: GitHub page URL for reference
|
||||
- raw_schema_url: Raw markdown URL
|
||||
- schema_content: Full Cartography schema markdown with node/relationship definitions
|
||||
|
||||
Workflow:
|
||||
1. Use prowler_app_list_attack_paths_scans to find a completed scan
|
||||
2. Use this tool to get the schema for the scan's provider
|
||||
3. Use the schema to craft custom openCypher queries
|
||||
4. Execute queries with prowler_app_run_attack_paths_query
|
||||
"""
|
||||
try:
|
||||
api_response = await self.api_client.get(
|
||||
f"/attack-paths-scans/{scan_id}/schema"
|
||||
)
|
||||
|
||||
schema = AttackPathCartographySchema.from_api_response(api_response)
|
||||
|
||||
schema_content = await self.api_client.fetch_external_url(
|
||||
schema.raw_schema_url
|
||||
)
|
||||
|
||||
return schema.model_copy(
|
||||
update={"schema_content": schema_content}
|
||||
).model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Failed to get cartography schema for scan {scan_id}: {e}"
|
||||
)
|
||||
return {"error": f"Failed to get cartography schema: {str(e)}"}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user