Compare commits

..

5 Commits

Author SHA1 Message Date
Alan Buscaglia f34a025acc fix(ui): improve category label formatting for acronyms and versions 2025-12-16 18:09:18 +01:00
Alan Buscaglia d2886a5e10 refactor(ui): simplify categories.ts to only use dynamic label formatting
Remove hardcoded CATEGORY_IDS and CATEGORY_LABELS constants.
Categories now come from the API and are formatted dynamically.
2025-12-15 19:16:06 +01:00
Alan Buscaglia 1e1dfa29c0 chore(ui): apply ThreatScore wording from PR #9524 2025-12-15 19:13:29 +01:00
Alan Buscaglia 747e6c9f81 feat(ui): add 'All categories' option to risk radar selector 2025-12-15 17:50:01 +01:00
Alan Buscaglia dab231d626 feat(ui): add category selector for risk radar and findings filters
- Add CategorySelector component to risk radar view for filtering by category
- Add fade effect to non-selected radar chart points
- Add category filter to findings page using shared labelFormatter
- Extract category labels to shared lib/categories.ts
- Remove category from active filter badges (now handled by select)
2025-12-15 17:46:03 +01:00
297 changed files with 7303 additions and 18605 deletions
+1 -8
View File
@@ -15,13 +15,6 @@ AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
# Google Tag Manager ID
NEXT_PUBLIC_GOOGLE_TAG_MANAGER_ID=""
#### MCP Server ####
PROWLER_MCP_VERSION=stable
# For UI and MCP running on docker:
PROWLER_MCP_SERVER_URL=http://mcp-server:8000/mcp
# For UI running on host, MCP in docker:
# PROWLER_MCP_SERVER_URL=http://localhost:8000/mcp
#### Code Review Configuration ####
# Enable Claude Code standards validation on pre-push hook
# Set to 'true' to validate changes against AGENTS.md standards via Claude Code
@@ -119,7 +112,7 @@ NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
#### Prowler release version ####
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.16.0
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.12.2
# Social login credentials
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
-254
View File
@@ -1,254 +0,0 @@
name: 'API: Bump Version'
on:
release:
types:
- 'published'
concurrency:
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
cancel-in-progress: false
env:
PROWLER_VERSION: ${{ github.event.release.tag_name }}
BASE_BRANCH: master
jobs:
detect-release-type:
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
outputs:
is_minor: ${{ steps.detect.outputs.is_minor }}
is_patch: ${{ steps.detect.outputs.is_patch }}
major_version: ${{ steps.detect.outputs.major_version }}
minor_version: ${{ steps.detect.outputs.minor_version }}
patch_version: ${{ steps.detect.outputs.patch_version }}
current_api_version: ${{ steps.get_api_version.outputs.current_api_version }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Get current API version
id: get_api_version
run: |
CURRENT_API_VERSION=$(grep -oP '^version = "\K[^"]+' api/pyproject.toml)
echo "current_api_version=${CURRENT_API_VERSION}" >> "${GITHUB_OUTPUT}"
echo "Current API version: $CURRENT_API_VERSION"
- name: Detect release type and parse version
id: detect
run: |
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR_VERSION=${BASH_REMATCH[1]}
MINOR_VERSION=${BASH_REMATCH[2]}
PATCH_VERSION=${BASH_REMATCH[3]}
echo "major_version=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "minor_version=${MINOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "patch_version=${PATCH_VERSION}" >> "${GITHUB_OUTPUT}"
if (( MAJOR_VERSION != 5 )); then
echo "::error::Releasing another Prowler major version, aborting..."
exit 1
fi
if (( PATCH_VERSION == 0 )); then
echo "is_minor=true" >> "${GITHUB_OUTPUT}"
echo "is_patch=false" >> "${GITHUB_OUTPUT}"
echo "✓ Minor release detected: $PROWLER_VERSION"
else
echo "is_minor=false" >> "${GITHUB_OUTPUT}"
echo "is_patch=true" >> "${GITHUB_OUTPUT}"
echo "✓ Patch release detected: $PROWLER_VERSION"
fi
else
echo "::error::Invalid version syntax: '$PROWLER_VERSION' (must be X.Y.Z)"
exit 1
fi
bump-minor-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_minor == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next API minor version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
CURRENT_API_VERSION="${{ needs.detect-release-type.outputs.current_api_version }}"
# API version follows Prowler minor + 1
# For Prowler 5.17.0 -> API 1.18.0
# For next master (Prowler 5.18.0) -> API 1.19.0
NEXT_API_VERSION=1.$((MINOR_VERSION + 2)).0
echo "CURRENT_API_VERSION=${CURRENT_API_VERSION}" >> "${GITHUB_ENV}"
echo "NEXT_API_VERSION=${NEXT_API_VERSION}" >> "${GITHUB_ENV}"
echo "Prowler release version: ${MAJOR_VERSION}.${MINOR_VERSION}.0"
echo "Current API version: $CURRENT_API_VERSION"
echo "Next API minor version (for master): $NEXT_API_VERSION"
- name: Bump API versions in files for master
run: |
set -e
sed -i "s|version = \"${CURRENT_API_VERSION}\"|version = \"${NEXT_API_VERSION}\"|" api/pyproject.toml
sed -i "s|spectacular_settings.VERSION = \"${CURRENT_API_VERSION}\"|spectacular_settings.VERSION = \"${NEXT_API_VERSION}\"|" api/src/backend/api/v1/views.py
sed -i "s| version: ${CURRENT_API_VERSION}| version: ${NEXT_API_VERSION}|" api/src/backend/api/specs/v1.yaml
echo "Files modified:"
git --no-pager diff
- name: Create PR for next API minor version to master
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: master
commit-message: 'chore(api): Bump version to v${{ env.NEXT_API_VERSION }}'
branch: api-version-bump-to-v${{ env.NEXT_API_VERSION }}
title: 'chore(api): Bump version to v${{ env.NEXT_API_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler API version to v${{ env.NEXT_API_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
- name: Checkout version branch
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
- name: Calculate first API patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
CURRENT_API_VERSION="${{ needs.detect-release-type.outputs.current_api_version }}"
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
# API version follows Prowler minor + 1
# For Prowler 5.17.0 release -> version branch v5.17 should have API 1.18.1
FIRST_API_PATCH_VERSION=1.$((MINOR_VERSION + 1)).1
echo "CURRENT_API_VERSION=${CURRENT_API_VERSION}" >> "${GITHUB_ENV}"
echo "FIRST_API_PATCH_VERSION=${FIRST_API_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "Prowler release version: ${MAJOR_VERSION}.${MINOR_VERSION}.0"
echo "First API patch version (for ${VERSION_BRANCH}): $FIRST_API_PATCH_VERSION"
echo "Version branch: $VERSION_BRANCH"
- name: Bump API versions in files for version branch
run: |
set -e
sed -i "s|version = \"${CURRENT_API_VERSION}\"|version = \"${FIRST_API_PATCH_VERSION}\"|" api/pyproject.toml
sed -i "s|spectacular_settings.VERSION = \"${CURRENT_API_VERSION}\"|spectacular_settings.VERSION = \"${FIRST_API_PATCH_VERSION}\"|" api/src/backend/api/v1/views.py
sed -i "s| version: ${CURRENT_API_VERSION}| version: ${FIRST_API_PATCH_VERSION}|" api/src/backend/api/specs/v1.yaml
echo "Files modified:"
git --no-pager diff
- name: Create PR for first API patch version to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'chore(api): Bump version to v${{ env.FIRST_API_PATCH_VERSION }}'
branch: api-version-bump-to-v${{ env.FIRST_API_PATCH_VERSION }}
title: 'chore(api): Bump version to v${{ env.FIRST_API_PATCH_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler API version to v${{ env.FIRST_API_PATCH_VERSION }} in version branch after releasing Prowler v${{ env.PROWLER_VERSION }}.
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
bump-patch-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_patch == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next API patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
PATCH_VERSION=${{ needs.detect-release-type.outputs.patch_version }}
CURRENT_API_VERSION="${{ needs.detect-release-type.outputs.current_api_version }}"
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
# Extract current API patch to increment it
if [[ $CURRENT_API_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
API_PATCH=${BASH_REMATCH[3]}
# API version follows Prowler minor + 1
# Keep same API minor (based on Prowler minor), increment patch
NEXT_API_PATCH_VERSION=1.$((MINOR_VERSION + 1)).$((API_PATCH + 1))
echo "CURRENT_API_VERSION=${CURRENT_API_VERSION}" >> "${GITHUB_ENV}"
echo "NEXT_API_PATCH_VERSION=${NEXT_API_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "Prowler release version: ${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}"
echo "Current API version: $CURRENT_API_VERSION"
echo "Next API patch version: $NEXT_API_PATCH_VERSION"
echo "Target branch: $VERSION_BRANCH"
else
echo "::error::Invalid API version format: $CURRENT_API_VERSION"
exit 1
fi
- name: Bump API versions in files for version branch
run: |
set -e
sed -i "s|version = \"${CURRENT_API_VERSION}\"|version = \"${NEXT_API_PATCH_VERSION}\"|" api/pyproject.toml
sed -i "s|spectacular_settings.VERSION = \"${CURRENT_API_VERSION}\"|spectacular_settings.VERSION = \"${NEXT_API_PATCH_VERSION}\"|" api/src/backend/api/v1/views.py
sed -i "s| version: ${CURRENT_API_VERSION}| version: ${NEXT_API_PATCH_VERSION}|" api/src/backend/api/specs/v1.yaml
echo "Files modified:"
git --no-pager diff
- name: Create PR for next API patch version to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'chore(api): Bump version to v${{ env.NEXT_API_PATCH_VERSION }}'
branch: api-version-bump-to-v${{ env.NEXT_API_PATCH_VERSION }}
title: 'chore(api): Bump version to v${{ env.NEXT_API_PATCH_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler API version to v${{ env.NEXT_API_PATCH_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
+1 -1
View File
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for API changes
id: check-changes
+3 -3
View File
@@ -42,15 +42,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Initialize CodeQL
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql/api-codeql-config.yml
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
category: '/language:${{ matrix.language }}'
@@ -57,7 +57,7 @@ jobs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
@@ -93,7 +93,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
@@ -170,7 +170,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
@@ -207,7 +207,7 @@ jobs:
steps:
- name: Trigger API deployment
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.CLOUD_DISPATCH }}
+2 -2
View File
@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check if Dockerfile changed
id: dockerfile-changed
@@ -63,7 +63,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for API changes
id: check-changes
+1 -1
View File
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for API changes
id: check-changes
+1 -1
View File
@@ -73,7 +73,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for API changes
id: check-changes
-247
View File
@@ -1,247 +0,0 @@
name: 'Docs: Bump Version'
on:
release:
types:
- 'published'
concurrency:
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
cancel-in-progress: false
env:
PROWLER_VERSION: ${{ github.event.release.tag_name }}
BASE_BRANCH: master
jobs:
detect-release-type:
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
outputs:
is_minor: ${{ steps.detect.outputs.is_minor }}
is_patch: ${{ steps.detect.outputs.is_patch }}
major_version: ${{ steps.detect.outputs.major_version }}
minor_version: ${{ steps.detect.outputs.minor_version }}
patch_version: ${{ steps.detect.outputs.patch_version }}
current_docs_version: ${{ steps.get_docs_version.outputs.current_docs_version }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Get current documentation version
id: get_docs_version
run: |
CURRENT_DOCS_VERSION=$(grep -oP 'PROWLER_UI_VERSION="\K[^"]+' docs/getting-started/installation/prowler-app.mdx)
echo "current_docs_version=${CURRENT_DOCS_VERSION}" >> "${GITHUB_OUTPUT}"
echo "Current documentation version: $CURRENT_DOCS_VERSION"
- name: Detect release type and parse version
id: detect
run: |
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR_VERSION=${BASH_REMATCH[1]}
MINOR_VERSION=${BASH_REMATCH[2]}
PATCH_VERSION=${BASH_REMATCH[3]}
echo "major_version=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "minor_version=${MINOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "patch_version=${PATCH_VERSION}" >> "${GITHUB_OUTPUT}"
if (( MAJOR_VERSION != 5 )); then
echo "::error::Releasing another Prowler major version, aborting..."
exit 1
fi
if (( PATCH_VERSION == 0 )); then
echo "is_minor=true" >> "${GITHUB_OUTPUT}"
echo "is_patch=false" >> "${GITHUB_OUTPUT}"
echo "✓ Minor release detected: $PROWLER_VERSION"
else
echo "is_minor=false" >> "${GITHUB_OUTPUT}"
echo "is_patch=true" >> "${GITHUB_OUTPUT}"
echo "✓ Patch release detected: $PROWLER_VERSION"
fi
else
echo "::error::Invalid version syntax: '$PROWLER_VERSION' (must be X.Y.Z)"
exit 1
fi
bump-minor-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_minor == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next minor version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
CURRENT_DOCS_VERSION="${{ needs.detect-release-type.outputs.current_docs_version }}"
NEXT_MINOR_VERSION=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).0
echo "CURRENT_DOCS_VERSION=${CURRENT_DOCS_VERSION}" >> "${GITHUB_ENV}"
echo "NEXT_MINOR_VERSION=${NEXT_MINOR_VERSION}" >> "${GITHUB_ENV}"
echo "Current documentation version: $CURRENT_DOCS_VERSION"
echo "Current release version: $PROWLER_VERSION"
echo "Next minor version: $NEXT_MINOR_VERSION"
- name: Bump versions in documentation for master
run: |
set -e
# Update prowler-app.mdx with current release version
sed -i "s|PROWLER_UI_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_UI_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
sed -i "s|PROWLER_API_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_API_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
echo "Files modified:"
git --no-pager diff
- name: Create PR for documentation update to master
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: master
commit-message: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
branch: docs-version-update-to-v${{ env.PROWLER_VERSION }}
title: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Update Prowler documentation version references to v${{ env.PROWLER_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `docs/getting-started/installation/prowler-app.mdx`: `PROWLER_UI_VERSION` and `PROWLER_API_VERSION`
- All `*.mdx` files with `<VersionBadge>` components
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
- name: Checkout version branch
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
- name: Calculate first patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
CURRENT_DOCS_VERSION="${{ needs.detect-release-type.outputs.current_docs_version }}"
FIRST_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.1
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
echo "CURRENT_DOCS_VERSION=${CURRENT_DOCS_VERSION}" >> "${GITHUB_ENV}"
echo "FIRST_PATCH_VERSION=${FIRST_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "First patch version: $FIRST_PATCH_VERSION"
echo "Version branch: $VERSION_BRANCH"
- name: Bump versions in documentation for version branch
run: |
set -e
# Update prowler-app.mdx with current release version
sed -i "s|PROWLER_UI_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_UI_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
sed -i "s|PROWLER_API_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_API_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
echo "Files modified:"
git --no-pager diff
- name: Create PR for documentation update to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
branch: docs-version-update-to-v${{ env.PROWLER_VERSION }}-branch
title: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Update Prowler documentation version references to v${{ env.PROWLER_VERSION }} in version branch after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `docs/getting-started/installation/prowler-app.mdx`: `PROWLER_UI_VERSION` and `PROWLER_API_VERSION`
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
bump-patch-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_patch == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
PATCH_VERSION=${{ needs.detect-release-type.outputs.patch_version }}
CURRENT_DOCS_VERSION="${{ needs.detect-release-type.outputs.current_docs_version }}"
NEXT_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.$((PATCH_VERSION + 1))
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
echo "CURRENT_DOCS_VERSION=${CURRENT_DOCS_VERSION}" >> "${GITHUB_ENV}"
echo "NEXT_PATCH_VERSION=${NEXT_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "Current documentation version: $CURRENT_DOCS_VERSION"
echo "Current release version: $PROWLER_VERSION"
echo "Next patch version: $NEXT_PATCH_VERSION"
echo "Target branch: $VERSION_BRANCH"
- name: Bump versions in documentation for patch version
run: |
set -e
# Update prowler-app.mdx with current release version
sed -i "s|PROWLER_UI_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_UI_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
sed -i "s|PROWLER_API_VERSION=\"${CURRENT_DOCS_VERSION}\"|PROWLER_API_VERSION=\"${PROWLER_VERSION}\"|" docs/getting-started/installation/prowler-app.mdx
echo "Files modified:"
git --no-pager diff
- name: Create PR for documentation update to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
branch: docs-version-update-to-v${{ env.PROWLER_VERSION }}
title: 'docs: Update version to v${{ env.PROWLER_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Update Prowler documentation version references to v${{ env.PROWLER_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `docs/getting-started/installation/prowler-app.mdx`: `PROWLER_UI_VERSION` and `PROWLER_API_VERSION`
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
+2 -2
View File
@@ -23,11 +23,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
- name: Scan for secrets with TruffleHog
uses: trufflesecurity/trufflehog@aade3bff5594fe8808578dd4db3dfeae9bf2abdc # v3.91.1
uses: trufflesecurity/trufflehog@b84c3d14d189e16da175e2c27fa8136603783ffc # v3.90.12
with:
extra_args: '--results=verified,unknown'
@@ -56,7 +56,7 @@ jobs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
@@ -91,7 +91,7 @@ jobs:
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
@@ -176,7 +176,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
@@ -213,7 +213,7 @@ jobs:
steps:
- name: Trigger MCP deployment
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.CLOUD_DISPATCH }}
+2 -2
View File
@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check if Dockerfile changed
id: dockerfile-changed
@@ -62,7 +62,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for MCP changes
id: check-changes
-81
View File
@@ -1,81 +0,0 @@
name: "MCP: PyPI Release"
on:
release:
types:
- "published"
concurrency:
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
cancel-in-progress: false
env:
RELEASE_TAG: ${{ github.event.release.tag_name }}
PYTHON_VERSION: "3.12"
WORKING_DIRECTORY: ./mcp_server
jobs:
validate-release:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
outputs:
prowler_version: ${{ steps.parse-version.outputs.version }}
major_version: ${{ steps.parse-version.outputs.major }}
steps:
- name: Parse and validate version
id: parse-version
run: |
PROWLER_VERSION="${{ env.RELEASE_TAG }}"
echo "version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
# Extract major version
MAJOR_VERSION="${PROWLER_VERSION%%.*}"
echo "major=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
# Validate major version (only Prowler 3, 4, 5 supported)
case ${MAJOR_VERSION} in
3|4|5)
echo "✓ Releasing Prowler MCP for tag ${PROWLER_VERSION}"
;;
*)
echo "::error::Unsupported Prowler major version: ${MAJOR_VERSION}"
exit 1
;;
esac
publish-prowler-mcp:
needs: validate-release
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
id-token: write
environment:
name: pypi-prowler-mcp
url: https://pypi.org/project/prowler-mcp/
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build prowler-mcp package
working-directory: ${{ env.WORKING_DIRECTORY }}
run: uv build
- name: Publish prowler-mcp package to PyPI
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
with:
packages-dir: ${{ env.WORKING_DIRECTORY }}/dist/
print-hash: true
+1 -1
View File
@@ -29,7 +29,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
+1 -1
View File
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout PR head
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
+2 -5
View File
@@ -13,10 +13,7 @@ concurrency:
jobs:
trigger-cloud-pull-request:
if: |
github.event.pull_request.merged == true &&
github.repository == 'prowler-cloud/prowler' &&
!contains(github.event.pull_request.labels.*.name, 'skip-sync')
if: github.event.pull_request.merged == true && github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
@@ -29,7 +26,7 @@ jobs:
echo "SHORT_SHA=${SHORT_SHA::7}" >> $GITHUB_ENV
- name: Trigger Cloud repository pull request
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.CLOUD_DISPATCH }}
+2 -2
View File
@@ -27,13 +27,13 @@ jobs:
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
- name: Set up Python
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: '3.12'
+9 -6
View File
@@ -67,7 +67,7 @@ jobs:
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Calculate next minor version
run: |
@@ -86,6 +86,7 @@ jobs:
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${NEXT_MINOR_VERSION}\"|" pyproject.toml
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${NEXT_MINOR_VERSION}\"|" prowler/config/config.py
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_MINOR_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
@@ -99,7 +100,7 @@ jobs:
commit-message: 'chore(release): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
branch: version-bump-to-v${{ env.NEXT_MINOR_VERSION }}
title: 'chore(release): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
labels: no-changelog,skip-sync
labels: no-changelog
body: |
### Description
@@ -110,7 +111,7 @@ jobs:
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
- name: Checkout version branch
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
@@ -134,6 +135,7 @@ jobs:
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${FIRST_PATCH_VERSION}\"|" pyproject.toml
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${FIRST_PATCH_VERSION}\"|" prowler/config/config.py
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${FIRST_PATCH_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
@@ -147,7 +149,7 @@ jobs:
commit-message: 'chore(release): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
branch: version-bump-to-v${{ env.FIRST_PATCH_VERSION }}
title: 'chore(release): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
labels: no-changelog,skip-sync
labels: no-changelog
body: |
### Description
@@ -167,7 +169,7 @@ jobs:
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Calculate next patch version
run: |
@@ -191,6 +193,7 @@ jobs:
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${NEXT_PATCH_VERSION}\"|" pyproject.toml
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${NEXT_PATCH_VERSION}\"|" prowler/config/config.py
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_PATCH_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
@@ -204,7 +207,7 @@ jobs:
commit-message: 'chore(release): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
branch: version-bump-to-v${{ env.NEXT_PATCH_VERSION }}
title: 'chore(release): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
labels: no-changelog,skip-sync
labels: no-changelog
body: |
### Description
+2 -2
View File
@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for SDK changes
id: check-changes
@@ -62,7 +62,7 @@ jobs:
- name: Set up Python ${{ matrix.python-version }}
if: steps.check-changes.outputs.any_changed == 'true'
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ matrix.python-version }}
cache: 'poetry'
+3 -3
View File
@@ -49,15 +49,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Initialize CodeQL
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql/sdk-codeql-config.yml
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
category: '/language:${{ matrix.language }}'
@@ -61,10 +61,10 @@ jobs:
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -115,7 +115,7 @@ jobs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
@@ -151,7 +151,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
@@ -252,7 +252,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
@@ -294,7 +294,7 @@ jobs:
- name: Dispatch v3 deployment (latest)
if: github.event_name == 'push'
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
@@ -303,7 +303,7 @@ jobs:
- name: Dispatch v3 deployment (release)
if: github.event_name == 'release'
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
+2 -2
View File
@@ -27,7 +27,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check if Dockerfile changed
id: dockerfile-changed
@@ -62,7 +62,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for SDK changes
id: check-changes
+4 -4
View File
@@ -59,13 +59,13 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Poetry
run: pipx install poetry==2.1.1
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'poetry'
@@ -91,13 +91,13 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install Poetry
run: pipx install poetry==2.1.1
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'poetry'
@@ -25,12 +25,12 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
ref: 'master'
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip'
@@ -39,7 +39,7 @@ jobs:
run: pip install boto3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
uses: aws-actions/configure-aws-credentials@00943011d9042930efac3dcd3a170e4273319bc8 # v5.1.0
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ secrets.DEV_IAM_ROLE_ARN }}
+2 -2
View File
@@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for SDK changes
id: check-changes
@@ -55,7 +55,7 @@ jobs:
- name: Set up Python 3.12
if: steps.check-changes.outputs.any_changed == 'true'
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: '3.12'
cache: 'poetry'
+2 -2
View File
@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for SDK changes
id: check-changes
@@ -62,7 +62,7 @@ jobs:
- name: Set up Python ${{ matrix.python-version }}
if: steps.check-changes.outputs.any_changed == 'true'
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ matrix.python-version }}
cache: 'poetry'
-221
View File
@@ -1,221 +0,0 @@
name: 'UI: Bump Version'
on:
release:
types:
- 'published'
concurrency:
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
cancel-in-progress: false
env:
PROWLER_VERSION: ${{ github.event.release.tag_name }}
BASE_BRANCH: master
jobs:
detect-release-type:
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
outputs:
is_minor: ${{ steps.detect.outputs.is_minor }}
is_patch: ${{ steps.detect.outputs.is_patch }}
major_version: ${{ steps.detect.outputs.major_version }}
minor_version: ${{ steps.detect.outputs.minor_version }}
patch_version: ${{ steps.detect.outputs.patch_version }}
steps:
- name: Detect release type and parse version
id: detect
run: |
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR_VERSION=${BASH_REMATCH[1]}
MINOR_VERSION=${BASH_REMATCH[2]}
PATCH_VERSION=${BASH_REMATCH[3]}
echo "major_version=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "minor_version=${MINOR_VERSION}" >> "${GITHUB_OUTPUT}"
echo "patch_version=${PATCH_VERSION}" >> "${GITHUB_OUTPUT}"
if (( MAJOR_VERSION != 5 )); then
echo "::error::Releasing another Prowler major version, aborting..."
exit 1
fi
if (( PATCH_VERSION == 0 )); then
echo "is_minor=true" >> "${GITHUB_OUTPUT}"
echo "is_patch=false" >> "${GITHUB_OUTPUT}"
echo "✓ Minor release detected: $PROWLER_VERSION"
else
echo "is_minor=false" >> "${GITHUB_OUTPUT}"
echo "is_patch=true" >> "${GITHUB_OUTPUT}"
echo "✓ Patch release detected: $PROWLER_VERSION"
fi
else
echo "::error::Invalid version syntax: '$PROWLER_VERSION' (must be X.Y.Z)"
exit 1
fi
bump-minor-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_minor == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next minor version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
NEXT_MINOR_VERSION=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).0
echo "NEXT_MINOR_VERSION=${NEXT_MINOR_VERSION}" >> "${GITHUB_ENV}"
echo "Current version: $PROWLER_VERSION"
echo "Next minor version: $NEXT_MINOR_VERSION"
- name: Bump UI version in .env for master
run: |
set -e
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_MINOR_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
- name: Create PR for next minor version to master
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: master
commit-message: 'chore(ui): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
branch: ui-version-bump-to-v${{ env.NEXT_MINOR_VERSION }}
title: 'chore(ui): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler UI version to v${{ env.NEXT_MINOR_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `.env`: `NEXT_PUBLIC_PROWLER_RELEASE_VERSION`
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
- name: Checkout version branch
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
- name: Calculate first patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
FIRST_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.1
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
echo "FIRST_PATCH_VERSION=${FIRST_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "First patch version: $FIRST_PATCH_VERSION"
echo "Version branch: $VERSION_BRANCH"
- name: Bump UI version in .env for version branch
run: |
set -e
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${FIRST_PATCH_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
- name: Create PR for first patch version to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'chore(ui): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
branch: ui-version-bump-to-v${{ env.FIRST_PATCH_VERSION }}
title: 'chore(ui): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler UI version to v${{ env.FIRST_PATCH_VERSION }} in version branch after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `.env`: `NEXT_PUBLIC_PROWLER_RELEASE_VERSION`
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
bump-patch-version:
needs: detect-release-type
if: needs.detect-release-type.outputs.is_patch == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Calculate next patch version
run: |
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
PATCH_VERSION=${{ needs.detect-release-type.outputs.patch_version }}
NEXT_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.$((PATCH_VERSION + 1))
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
echo "NEXT_PATCH_VERSION=${NEXT_PATCH_VERSION}" >> "${GITHUB_ENV}"
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
echo "Current version: $PROWLER_VERSION"
echo "Next patch version: $NEXT_PATCH_VERSION"
echo "Target branch: $VERSION_BRANCH"
- name: Bump UI version in .env for version branch
run: |
set -e
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_PATCH_VERSION}|" .env
echo "Files modified:"
git --no-pager diff
- name: Create PR for next patch version to version branch
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
base: ${{ env.VERSION_BRANCH }}
commit-message: 'chore(ui): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
branch: ui-version-bump-to-v${{ env.NEXT_PATCH_VERSION }}
title: 'chore(ui): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
labels: no-changelog,skip-sync
body: |
### Description
Bump Prowler UI version to v${{ env.NEXT_PATCH_VERSION }} after releasing Prowler v${{ env.PROWLER_VERSION }}.
### Files Updated
- `.env`: `NEXT_PUBLIC_PROWLER_RELEASE_VERSION`
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
+3 -3
View File
@@ -45,15 +45,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Initialize CodeQL
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql/ui-codeql-config.yml
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
with:
category: '/language:${{ matrix.language }}'
@@ -59,7 +59,7 @@ jobs:
message-ts: ${{ steps.slack-notification.outputs.ts }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Notify container push started
id: slack-notification
@@ -95,7 +95,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Login to DockerHub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
@@ -175,7 +175,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Determine overall outcome
id: outcome
@@ -212,7 +212,7 @@ jobs:
steps:
- name: Trigger UI deployment
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
with:
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
repository: ${{ secrets.CLOUD_DISPATCH }}
+2 -2
View File
@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check if Dockerfile changed
id: dockerfile-changed
@@ -63,7 +63,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for UI changes
id: check-changes
+1 -1
View File
@@ -54,7 +54,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1
with:
+1 -1
View File
@@ -30,7 +30,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Check for UI changes
id: check-changes
+4 -4
View File
@@ -47,12 +47,12 @@ help: ## Show this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Build no cache
build-no-cache-dev:
docker compose -f docker-compose-dev.yml build --no-cache api-dev worker-dev worker-beat mcp-server
build-no-cache-dev:
docker compose -f docker-compose-dev.yml build --no-cache api-dev worker-dev worker-beat
##@ Development Environment
run-api-dev: ## Start development environment with API, PostgreSQL, Valkey, MCP, and workers
docker compose -f docker-compose-dev.yml up api-dev postgres valkey worker-dev worker-beat mcp-server
run-api-dev: ## Start development environment with API, PostgreSQL, Valkey, and workers
docker compose -f docker-compose-dev.yml up api-dev postgres valkey worker-dev worker-beat
##@ Development Environment
build-and-run-api-dev: build-no-cache-dev run-api-dev
+1 -2
View File
@@ -277,12 +277,11 @@ python prowler-cli.py -v
# ✏️ High level architecture
## Prowler App
**Prowler App** is composed of four key components:
**Prowler App** is composed of three key components:
- **Prowler UI**: A web-based interface, built with Next.js, providing a user-friendly experience for executing Prowler scans and visualizing results.
- **Prowler API**: A backend service, developed with Django REST Framework, responsible for running Prowler scans and storing the generated results.
- **Prowler SDK**: A Python SDK designed to extend the functionality of the Prowler CLI for advanced capabilities.
- **Prowler MCP Server**: A Model Context Protocol server that provides AI tools for Lighthouse, the AI-powered security assistant. This is a critical dependency for Lighthouse functionality.
![Prowler App Architecture](docs/products/img/prowler-app-architecture.png)
+2 -21
View File
@@ -2,29 +2,11 @@
All notable changes to the **Prowler API** are documented in this file.
## [1.18.0] (Prowler UNRELEASED)
### Added
- Support AlibabaCloud provider [(#9485)](https://github.com/prowler-cloud/prowler/pull/9485)
---
## [1.17.1] (Prowler v5.16.1)
### Changed
- Security Hub integration error when no regions [(#9635)](https://github.com/prowler-cloud/prowler/pull/9635)
### Fixed
- Orphan scheduled scans caused by transaction isolation during provider creation [(#9633)](https://github.com/prowler-cloud/prowler/pull/9633)
---
## [1.17.0] (Prowler v5.16.0)
## [1.17.0] (Prowler UNRELEASED)
### Added
- New endpoint to retrieve and overview of the categories based on finding severities [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
- Endpoints `GET /findings` and `GET /findings/latests` can now use the category filter [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
- Account id, alias and provider name to PDF reporting table [(#9574)](https://github.com/prowler-cloud/prowler/pull/9574)
### Changed
- Endpoint `GET /overviews/attack-surfaces` no longer returns the related check IDs [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
@@ -32,8 +14,7 @@ All notable changes to the **Prowler API** are documented in this file.
- Increased execution delay for the first scheduled scan tasks to 5 seconds[(#9558)](https://github.com/prowler-cloud/prowler/pull/9558)
### Fixed
- Made `scan_id` a required filter in the compliance overview endpoint [(#9560)](https://github.com/prowler-cloud/prowler/pull/9560)
- Reduced unnecessary UPDATE resources operations by only saving when tag mappings change, lowering write load during scans [(#9569)](https://github.com/prowler-cloud/prowler/pull/9569)
- Make `scan_id` a required filter in the compliance overview endpoint [(#9560)](https://github.com/prowler-cloud/prowler/pull/9560)
---
+1910 -2961
View File
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -44,7 +44,7 @@ name = "prowler-api"
package-mode = false
# Needed for the SDK compatibility
requires-python = ">=3.11,<3.13"
version = "1.18.0"
version = "1.16.0"
[project.scripts]
celery = "src.backend.config.settings.celery"
@@ -26,11 +26,8 @@ class Migration(migrations.Migration):
),
),
(
"tenant",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="api.tenant",
),
"tenant_id",
models.UUIDField(db_index=True, editable=False),
),
(
"inserted_at",
@@ -59,6 +56,7 @@ class Migration(migrations.Migration):
("low", "Low"),
("informational", "Informational"),
],
max_length=50,
),
),
(
@@ -84,7 +82,6 @@ class Migration(migrations.Migration):
],
options={
"db_table": "scan_category_summaries",
"abstract": False,
},
),
migrations.AddIndex(
@@ -16,7 +16,6 @@ class Migration(migrations.Migration):
blank=True,
null=True,
size=None,
help_text="Categories from check metadata for efficient filtering",
),
),
]
@@ -1,37 +0,0 @@
# Generated by Django migration for Alibaba Cloud provider support
from django.db import migrations
import api.db_utils
class Migration(migrations.Migration):
dependencies = [
("api", "0064_finding_categories"),
]
operations = [
migrations.AlterField(
model_name="provider",
name="provider",
field=api.db_utils.ProviderEnumField(
choices=[
("aws", "AWS"),
("azure", "Azure"),
("gcp", "GCP"),
("kubernetes", "Kubernetes"),
("m365", "M365"),
("github", "GitHub"),
("mongodbatlas", "MongoDB Atlas"),
("iac", "IaC"),
("oraclecloud", "Oracle Cloud Infrastructure"),
("alibabacloud", "Alibaba Cloud"),
],
default="aws",
),
),
migrations.RunSQL(
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'alibabacloud';",
reverse_sql=migrations.RunSQL.noop,
),
]
+4 -19
View File
@@ -287,7 +287,6 @@ class Provider(RowLevelSecurityProtectedModel):
MONGODBATLAS = "mongodbatlas", _("MongoDB Atlas")
IAC = "iac", _("IaC")
ORACLECLOUD = "oraclecloud", _("Oracle Cloud Infrastructure")
ALIBABACLOUD = "alibabacloud", _("Alibaba Cloud")
@staticmethod
def validate_aws_uid(value):
@@ -392,15 +391,6 @@ class Provider(RowLevelSecurityProtectedModel):
pointer="/data/attributes/uid",
)
@staticmethod
def validate_alibabacloud_uid(value):
if not re.match(r"^\d{16}$", value):
raise ModelValidationError(
detail="Alibaba Cloud account ID must be exactly 16 digits.",
code="alibabacloud-uid",
pointer="/data/attributes/uid",
)
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
@@ -726,19 +716,14 @@ class Resource(RowLevelSecurityProtectedModel):
self.clear_tags()
return
# Add new relationships with the tenant_id field; avoid touching the
# Resource row unless a mapping is actually created to prevent noisy
# updates during scans.
mapping_created = False
# Add new relationships with the tenant_id field
for tag in tags:
_, created = ResourceTagMapping.objects.update_or_create(
ResourceTagMapping.objects.update_or_create(
tag=tag, resource=self, tenant_id=self.tenant_id
)
mapping_created = mapping_created or created
if mapping_created:
# Only bump updated_at when the tag set truly changed
self.save(update_fields=["updated_at"])
# Save the instance
self.save()
class Meta(RowLevelSecurityProtectedModel.Meta):
db_table = "resources"
+1 -75
View File
@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.18.0
version: 1.17.0
description: |-
Prowler API specification.
@@ -894,7 +894,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -905,7 +904,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -923,7 +921,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -936,7 +933,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -1451,7 +1447,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -1462,7 +1457,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -1480,7 +1474,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -1493,7 +1486,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -1916,7 +1908,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -1927,7 +1918,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -1945,7 +1935,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -1958,7 +1947,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -2379,7 +2367,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -2390,7 +2377,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -2408,7 +2394,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -2421,7 +2406,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -2830,7 +2814,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -2841,7 +2824,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -2859,7 +2841,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -2872,7 +2853,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -4967,7 +4947,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -4978,7 +4957,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -4996,7 +4974,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -5009,7 +4986,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -5160,7 +5136,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -5171,7 +5146,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -5189,7 +5163,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -5202,7 +5175,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -5347,7 +5319,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -5358,7 +5329,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -5375,7 +5345,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -5388,7 +5357,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- name: filter[search]
@@ -5575,7 +5543,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -5586,7 +5553,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -5604,7 +5570,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -5617,7 +5582,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -5751,7 +5715,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -5762,7 +5725,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -5780,7 +5742,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -5793,7 +5754,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -6574,7 +6534,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -6585,7 +6544,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider__in]
schema:
@@ -6603,7 +6561,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -6616,7 +6573,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -6634,7 +6590,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -6645,7 +6600,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -6663,7 +6617,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -6676,7 +6629,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- name: filter[search]
@@ -7288,7 +7240,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -7299,7 +7250,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -7317,7 +7267,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -7330,7 +7279,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -7675,7 +7623,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -7686,7 +7633,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -7704,7 +7650,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -7717,7 +7662,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -7957,7 +7901,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -7968,7 +7911,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -7986,7 +7928,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -7999,7 +7940,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -8245,7 +8185,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -8256,7 +8195,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -8274,7 +8212,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -8287,7 +8224,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -9096,7 +9032,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
* `aws` - AWS
* `azure` - Azure
@@ -9107,7 +9042,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
- in: query
name: filter[provider_type__in]
schema:
@@ -9125,7 +9059,6 @@ paths:
- m365
- mongodbatlas
- oraclecloud
- alibabacloud
description: |-
Multiple values may be separated by commas.
@@ -9138,7 +9071,6 @@ paths:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
explode: false
style: form
- in: query
@@ -16634,7 +16566,6 @@ components:
- mongodbatlas
- iac
- oraclecloud
- alibabacloud
type: string
description: |-
* `aws` - AWS
@@ -16646,7 +16577,6 @@ components:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
x-spec-enum-id: eca8c51e6bd28935
uid:
type: string
@@ -16762,7 +16692,6 @@ components:
- mongodbatlas
- iac
- oraclecloud
- alibabacloud
type: string
x-spec-enum-id: eca8c51e6bd28935
description: |-
@@ -16777,7 +16706,6 @@ components:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
uid:
type: string
title: Unique identifier for the provider, set by the provider
@@ -16824,7 +16752,6 @@ components:
- mongodbatlas
- iac
- oraclecloud
- alibabacloud
type: string
x-spec-enum-id: eca8c51e6bd28935
description: |-
@@ -16839,7 +16766,6 @@ components:
* `mongodbatlas` - MongoDB Atlas
* `iac` - IaC
* `oraclecloud` - Oracle Cloud Infrastructure
* `alibabacloud` - Alibaba Cloud
uid:
type: string
minLength: 3
-2
View File
@@ -16,7 +16,6 @@ from api.utils import (
return_prowler_provider,
validate_invitation,
)
from prowler.providers.alibabacloud.alibabacloud_provider import AlibabacloudProvider
from prowler.providers.aws.aws_provider import AwsProvider
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHubConnection
from prowler.providers.azure.azure_provider import AzureProvider
@@ -117,7 +116,6 @@ class TestReturnProwlerProvider:
(Provider.ProviderChoices.MONGODBATLAS.value, MongodbatlasProvider),
(Provider.ProviderChoices.ORACLECLOUD.value, OraclecloudProvider),
(Provider.ProviderChoices.IAC.value, IacProvider),
(Provider.ProviderChoices.ALIBABACLOUD.value, AlibabacloudProvider),
],
)
def test_return_prowler_provider(self, provider_type, expected_provider):
+4 -79
View File
@@ -1165,11 +1165,6 @@ class TestProviderViewSet:
"uid": "64b1d3c0e4b03b1234567890",
"alias": "Atlas Organization",
},
{
"provider": "alibabacloud",
"uid": "1234567890123456",
"alias": "Alibaba Cloud Account",
},
]
),
)
@@ -1519,36 +1514,6 @@ class TestProviderViewSet:
"mongodbatlas-uid",
"uid",
),
# Alibaba Cloud UID validation - too short (not 16 digits)
(
{
"provider": "alibabacloud",
"uid": "123456789012345",
"alias": "test",
},
"alibabacloud-uid",
"uid",
),
# Alibaba Cloud UID validation - too long (not 16 digits)
(
{
"provider": "alibabacloud",
"uid": "12345678901234567",
"alias": "test",
},
"alibabacloud-uid",
"uid",
),
# Alibaba Cloud UID validation - contains non-digits
(
{
"provider": "alibabacloud",
"uid": "123456789012345a",
"alias": "test",
},
"alibabacloud-uid",
"uid",
),
]
),
)
@@ -1722,21 +1687,21 @@ class TestProviderViewSet:
(
"uid.icontains",
"1",
8,
7,
),
("alias", "aws_testing_1", 1),
("alias.icontains", "aws", 2),
("inserted_at", TODAY, 9),
("inserted_at", TODAY, 8),
(
"inserted_at.gte",
"2024-01-01",
9,
8,
),
("inserted_at.lte", "2024-01-01", 0),
(
"updated_at.gte",
"2024-01-01",
9,
8,
),
("updated_at.lte", "2024-01-01", 0),
]
@@ -2286,46 +2251,6 @@ class TestProviderSecretViewSet:
"atlas_private_key": "private-key",
},
),
# Alibaba Cloud credentials (with access key only)
(
Provider.ProviderChoices.ALIBABACLOUD.value,
ProviderSecret.TypeChoices.STATIC,
{
"access_key_id": "LTAI5t1234567890abcdef",
"access_key_secret": "my-secret-access-key",
},
),
# Alibaba Cloud credentials (with STS security token)
(
Provider.ProviderChoices.ALIBABACLOUD.value,
ProviderSecret.TypeChoices.STATIC,
{
"access_key_id": "LTAI5t1234567890abcdef",
"access_key_secret": "my-secret-access-key",
"security_token": "my-security-token-for-sts",
},
),
# Alibaba Cloud RAM Role Assumption (minimal required fields)
(
Provider.ProviderChoices.ALIBABACLOUD.value,
ProviderSecret.TypeChoices.ROLE,
{
"role_arn": "acs:ram::1234567890123456:role/ProwlerRole",
"access_key_id": "LTAI5t1234567890abcdef",
"access_key_secret": "my-secret-access-key",
},
),
# Alibaba Cloud RAM Role Assumption (with optional role_session_name)
(
Provider.ProviderChoices.ALIBABACLOUD.value,
ProviderSecret.TypeChoices.ROLE,
{
"role_arn": "acs:ram::1234567890123456:role/ProwlerRole",
"access_key_id": "LTAI5t1234567890abcdef",
"access_key_secret": "my-secret-access-key",
"role_session_name": "ProwlerAuditSession",
},
),
],
)
def test_provider_secrets_create_valid(
+8 -12
View File
@@ -11,7 +11,6 @@ from api.exceptions import InvitationTokenExpiredException
from api.models import Integration, Invitation, Processor, Provider, Resource
from api.v1.serializers import FindingMetadataSerializer
from prowler.lib.outputs.jira.jira import Jira, JiraBasicAuthError
from prowler.providers.alibabacloud.alibabacloud_provider import AlibabacloudProvider
from prowler.providers.aws.aws_provider import AwsProvider
from prowler.providers.aws.lib.s3.s3 import S3
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHub
@@ -64,9 +63,8 @@ def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
def return_prowler_provider(
provider: Provider,
) -> (
AlibabacloudProvider
| AwsProvider
) -> [
AwsProvider
| AzureProvider
| GcpProvider
| GithubProvider
@@ -75,14 +73,14 @@ def return_prowler_provider(
| M365Provider
| MongodbatlasProvider
| OraclecloudProvider
):
]:
"""Return the Prowler provider class based on the given provider type.
Args:
provider (Provider): The provider object containing the provider type and associated secrets.
Returns:
AlibabacloudProvider | AwsProvider | AzureProvider | GcpProvider | GithubProvider | IacProvider | KubernetesProvider | M365Provider | MongodbatlasProvider | OraclecloudProvider: The corresponding provider class.
AwsProvider | AzureProvider | GcpProvider | GithubProvider | IacProvider | KubernetesProvider | M365Provider | OraclecloudProvider | MongodbatlasProvider: The corresponding provider class.
Raises:
ValueError: If the provider type specified in `provider.provider` is not supported.
@@ -106,8 +104,6 @@ def return_prowler_provider(
prowler_provider = IacProvider
case Provider.ProviderChoices.ORACLECLOUD.value:
prowler_provider = OraclecloudProvider
case Provider.ProviderChoices.ALIBABACLOUD.value:
prowler_provider = AlibabacloudProvider
case _:
raise ValueError(f"Provider type {provider.provider} not supported")
return prowler_provider
@@ -173,8 +169,7 @@ def initialize_prowler_provider(
provider: Provider,
mutelist_processor: Processor | None = None,
) -> (
AlibabacloudProvider
| AwsProvider
AwsProvider
| AzureProvider
| GcpProvider
| GithubProvider
@@ -191,8 +186,9 @@ def initialize_prowler_provider(
mutelist_processor (Processor): The mutelist processor object containing the mutelist configuration.
Returns:
AlibabacloudProvider | AwsProvider | AzureProvider | GcpProvider | GithubProvider | IacProvider | KubernetesProvider | M365Provider | MongodbatlasProvider | OraclecloudProvider: An instance of the corresponding provider class
initialized with the provider's secrets.
AwsProvider | AzureProvider | GcpProvider | GithubProvider | IacProvider | KubernetesProvider | M365Provider | OraclecloudProvider | MongodbatlasProvider: An instance of the corresponding provider class
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `GithubProvider`, `IacProvider`, `KubernetesProvider`, `M365Provider`, `OraclecloudProvider` or `MongodbatlasProvider`) initialized with the
provider's secrets.
"""
prowler_provider = return_prowler_provider(provider)
prowler_provider_kwargs = get_prowler_provider_kwargs(provider, mutelist_processor)
@@ -304,48 +304,6 @@ from rest_framework_json_api import serializers
},
"required": ["atlas_public_key", "atlas_private_key"],
},
{
"type": "object",
"title": "Alibaba Cloud Static Credentials",
"properties": {
"access_key_id": {
"type": "string",
"description": "The Alibaba Cloud access key ID for authentication.",
},
"access_key_secret": {
"type": "string",
"description": "The Alibaba Cloud access key secret for authentication.",
},
"security_token": {
"type": "string",
"description": "The STS security token for temporary credentials (optional).",
},
},
"required": ["access_key_id", "access_key_secret"],
},
{
"type": "object",
"title": "Alibaba Cloud RAM Role Assumption",
"properties": {
"role_arn": {
"type": "string",
"description": "The ARN of the RAM role to assume (e.g., acs:ram::1234567890123456:role/ProwlerRole).",
},
"access_key_id": {
"type": "string",
"description": "The Alibaba Cloud access key ID of the RAM user that will assume the role.",
},
"access_key_secret": {
"type": "string",
"description": "The Alibaba Cloud access key secret of the RAM user that will assume the role.",
},
"role_session_name": {
"type": "string",
"description": "An identifier for the role session (optional, defaults to 'ProwlerSession').",
},
},
"required": ["role_arn", "access_key_id", "access_key_secret"],
},
]
}
)
+1 -40
View File
@@ -1390,23 +1390,12 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
serializer = OracleCloudProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.MONGODBATLAS.value:
serializer = MongoDBAtlasProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.ALIBABACLOUD.value:
serializer = AlibabaCloudProviderSecret(data=secret)
else:
raise serializers.ValidationError(
{"provider": f"Provider type not supported {provider_type}"}
)
elif secret_type == ProviderSecret.TypeChoices.ROLE:
if provider_type == Provider.ProviderChoices.AWS.value:
serializer = AWSRoleAssumptionProviderSecret(data=secret)
elif provider_type == Provider.ProviderChoices.ALIBABACLOUD.value:
serializer = AlibabaCloudRoleAssumptionProviderSecret(data=secret)
else:
raise serializers.ValidationError(
{
"secret_type": f"Role assumption not supported for provider type: {provider_type}"
}
)
serializer = AWSRoleAssumptionProviderSecret(data=secret)
elif secret_type == ProviderSecret.TypeChoices.SERVICE_ACCOUNT:
serializer = GCPServiceAccountProviderSecret(data=secret)
else:
@@ -1543,34 +1532,6 @@ class OracleCloudProviderSecret(serializers.Serializer):
resource_name = "provider-secrets"
class AlibabaCloudProviderSecret(serializers.Serializer):
access_key_id = serializers.CharField()
access_key_secret = serializers.CharField()
security_token = serializers.CharField(required=False)
class Meta:
resource_name = "provider-secrets"
class AlibabaCloudRoleAssumptionProviderSecret(serializers.Serializer):
role_arn = serializers.CharField(
help_text="Access Key ID of the RAM user that will assume the role"
)
access_key_id = serializers.CharField(
help_text="Access Key ID of the RAM user that will assume the role"
)
access_key_secret = serializers.CharField(
help_text="Access Key Secret of the RAM user that will assume the role"
)
role_session_name = serializers.CharField(
required=False,
help_text="Session name for the assumed role session (optional, defaults to 'ProwlerSession')",
)
class Meta:
resource_name = "provider-secrets"
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
role_arn = serializers.CharField()
external_id = serializers.CharField()
+1 -1
View File
@@ -359,7 +359,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.18.0"
spectacular_settings.VERSION = "1.17.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
-7
View File
@@ -517,12 +517,6 @@ def providers_fixture(tenants_fixture):
alias="mongodbatlas_testing",
tenant_id=tenant.id,
)
provider9 = Provider.objects.create(
provider="alibabacloud",
uid="1234567890123456",
alias="alibabacloud_testing",
tenant_id=tenant.id,
)
return (
provider1,
@@ -533,7 +527,6 @@ def providers_fixture(tenants_fixture):
provider6,
provider7,
provider8,
provider9,
)
-11
View File
@@ -27,7 +27,6 @@ from prowler.lib.outputs.compliance.c5.c5_gcp import GCPC5
from prowler.lib.outputs.compliance.ccc.ccc_aws import CCC_AWS
from prowler.lib.outputs.compliance.ccc.ccc_azure import CCC_Azure
from prowler.lib.outputs.compliance.ccc.ccc_gcp import CCC_GCP
from prowler.lib.outputs.compliance.cis.cis_alibabacloud import AlibabaCloudCIS
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
@@ -51,9 +50,6 @@ from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
AzureMitreAttack,
)
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_gcp import GCPMitreAttack
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_alibaba import (
ProwlerThreatScoreAlibaba,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
ProwlerThreatScoreAWS,
)
@@ -132,13 +128,6 @@ COMPLIANCE_CLASS_MAP = {
"oraclecloud": [
(lambda name: name.startswith("cis_"), OracleCloudCIS),
],
"alibabacloud": [
(lambda name: name.startswith("cis_"), AlibabaCloudCIS),
(
lambda name: name == "prowler_threatscore_alibabacloud",
ProwlerThreatScoreAlibaba,
),
],
}
+15 -16
View File
@@ -19,9 +19,6 @@ from prowler.providers.aws.aws_provider import AwsProvider
from prowler.providers.aws.lib.s3.s3 import S3
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHub
from prowler.providers.common.models import Connection
from prowler.providers.aws.lib.security_hub.exceptions.exceptions import (
SecurityHubNoEnabledRegionsError,
)
logger = get_task_logger(__name__)
@@ -225,9 +222,8 @@ def get_security_hub_client_from_integration(
)
return True, security_hub
else:
# Reset regions information if connection fails and integration is not connected
# Reset regions information if connection fails
with rls_transaction(tenant_id, using=MainRouter.default_db):
integration.connected = False
integration.configuration["regions"] = {}
integration.save()
@@ -334,18 +330,15 @@ def upload_security_hub_integration(
)
if not connected:
if isinstance(
security_hub.error,
SecurityHubNoEnabledRegionsError,
logger.error(
f"Security Hub connection failed for integration {integration.id}: "
f"{security_hub.error}"
)
with rls_transaction(
tenant_id, using=MainRouter.default_db
):
logger.warning(
f"Security Hub integration {integration.id} has no enabled regions"
)
else:
logger.error(
f"Security Hub connection failed for integration {integration.id}: "
f"{security_hub.error}"
)
integration.connected = False
integration.save()
break # Skip this integration
security_hub_client = security_hub
@@ -416,16 +409,22 @@ def upload_security_hub_integration(
logger.warning(
f"Failed to archive previous findings: {str(archive_error)}"
)
except Exception as e:
logger.error(
f"Security Hub integration {integration.id} failed: {str(e)}"
)
continue
result = integration_executions == len(integrations)
if result:
logger.info(
f"All Security Hub integrations completed successfully for provider {provider_id}"
)
else:
logger.error(
f"Some Security Hub integrations failed for provider {provider_id}"
)
return result
+9 -56
View File
@@ -243,28 +243,15 @@ def _safe_getattr(obj, attr: str, default: str = "N/A") -> str:
def _create_info_table_style() -> TableStyle:
"""Create a reusable table style for information/metadata tables.
ReportLab TableStyle coordinate system:
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- Coordinates use (column, row) format, starting at (0, 0) for top-left cell
- Negative indices work like Python slicing: -1 means "last row/column"
- (0, 0) to (0, -1) = entire first column (all rows)
- (0, 0) to (-1, 0) = entire first row (all columns)
- (0, 0) to (-1, -1) = entire table
- Styles are applied in order; later rules override earlier ones
"""
"""Create a reusable table style for information/metadata tables."""
return TableStyle(
[
# Column 0 (labels): blue background with white text
("BACKGROUND", (0, 0), (0, -1), COLOR_BLUE),
("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
("FONTNAME", (0, 0), (0, -1), "FiraCode"),
# Column 1 (values): light blue background with gray text
("BACKGROUND", (1, 0), (1, -1), COLOR_BG_BLUE),
("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
# Apply to entire table
("ALIGN", (0, 0), (-1, -1), "LEFT"),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTSIZE", (0, 0), (-1, -1), 11),
@@ -278,30 +265,19 @@ def _create_info_table_style() -> TableStyle:
def _create_header_table_style(header_color: colors.Color = None) -> TableStyle:
"""Create a reusable table style for tables with headers.
ReportLab TableStyle coordinate system:
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- (0, 0) to (-1, 0) = entire first row (header row)
- (1, 1) to (-1, -1) = all data cells (excludes header row and first column)
- See _create_info_table_style() for full coordinate system documentation
"""
"""Create a reusable table style for tables with headers."""
if header_color is None:
header_color = COLOR_BLUE
return TableStyle(
[
# Header row (row 0): colored background with white text
("BACKGROUND", (0, 0), (-1, 0), header_color),
("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
("FONTSIZE", (0, 0), (-1, 0), 10),
# Apply to entire table
("ALIGN", (0, 0), (-1, -1), "CENTER"),
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
# Data cells (excluding header): smaller font
("FONTSIZE", (1, 1), (-1, -1), 9),
# Apply to entire table
("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
@@ -312,30 +288,18 @@ def _create_header_table_style(header_color: colors.Color = None) -> TableStyle:
def _create_findings_table_style() -> TableStyle:
"""Create a reusable table style for findings tables.
ReportLab TableStyle coordinate system:
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
- (0, 0) to (-1, 0) = entire first row (header row)
- (0, 0) to (0, 0) = only the top-left cell
- See _create_info_table_style() for full coordinate system documentation
"""
"""Create a reusable table style for findings tables."""
return TableStyle(
[
# Header row (row 0): colored background with white text
("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
# Only top-left cell centered (for index/number column)
("ALIGN", (0, 0), (0, 0), "CENTER"),
# Apply to entire table
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
("FONTSIZE", (0, 0), (-1, -1), 9),
("GRID", (0, 0), (-1, -1), 0.1, COLOR_BORDER_GRAY),
# Remove padding only from top-left cell
("LEFTPADDING", (0, 0), (0, 0), 0),
("RIGHTPADDING", (0, 0), (0, 0), 0),
# Apply to entire table
("TOPPADDING", (0, 0), (-1, -1), PADDING_SMALL),
("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_SMALL),
]
@@ -1139,15 +1103,11 @@ def generate_threatscore_report(
elements.append(Spacer(1, 0.5 * inch))
# Add compliance information table
provider_alias = provider_obj.alias or "N/A"
info_data = [
["Framework:", compliance_framework],
["ID:", compliance_id],
["Name:", Paragraph(compliance_name, normal_center)],
["Version:", compliance_version],
["Provider:", provider_type.upper()],
["Account ID:", provider_obj.uid],
["Alias:", provider_alias],
["Scan ID:", scan_id],
["Description:", Paragraph(compliance_description, normal_center)],
]
@@ -2099,15 +2059,12 @@ def generate_ens_report(
elements.append(Spacer(1, 0.5 * inch))
# Add compliance information table
provider_alias = provider_obj.alias or "N/A"
info_data = [
["Framework:", compliance_framework],
["ID:", compliance_id],
["Nombre:", Paragraph(compliance_name, normal_center)],
["Versión:", compliance_version],
["Proveedor:", provider_type.upper()],
["Account ID:", provider_obj.uid],
["Alias:", provider_alias],
["Scan ID:", scan_id],
["Descripción:", Paragraph(compliance_description, normal_center)],
]
@@ -2115,12 +2072,12 @@ def generate_ens_report(
info_table.setStyle(
TableStyle(
[
("BACKGROUND", (0, 0), (0, -1), colors.Color(0.2, 0.4, 0.6)),
("TEXTCOLOR", (0, 0), (0, -1), colors.white),
("FONTNAME", (0, 0), (0, -1), "FiraCode"),
("BACKGROUND", (1, 0), (1, -1), colors.Color(0.95, 0.97, 1.0)),
("TEXTCOLOR", (1, 0), (1, -1), colors.Color(0.2, 0.2, 0.2)),
("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
("BACKGROUND", (0, 0), (0, 6), colors.Color(0.2, 0.4, 0.6)),
("TEXTCOLOR", (0, 0), (0, 6), colors.white),
("FONTNAME", (0, 0), (0, 6), "FiraCode"),
("BACKGROUND", (1, 0), (1, 6), colors.Color(0.95, 0.97, 1.0)),
("TEXTCOLOR", (1, 0), (1, 6), colors.Color(0.2, 0.2, 0.2)),
("FONTNAME", (1, 0), (1, 6), "PlusJakartaSans"),
("ALIGN", (0, 0), (-1, -1), "LEFT"),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTSIZE", (0, 0), (-1, -1), 11),
@@ -3040,14 +2997,11 @@ def generate_nis2_report(
elements.append(Spacer(1, 0.3 * inch))
# Compliance metadata table
provider_alias = provider_obj.alias or "N/A"
metadata_data = [
["Framework:", compliance_framework],
["Name:", Paragraph(compliance_name, normal_center)],
["Version:", compliance_version or "N/A"],
["Provider:", provider_type.upper()],
["Account ID:", provider_obj.uid],
["Alias:", provider_alias],
["Scan ID:", scan_id],
["Description:", Paragraph(compliance_description, normal_center)],
]
@@ -3531,7 +3485,6 @@ def generate_compliance_reports(
"gcp",
"m365",
"kubernetes",
"alibabacloud",
]:
logger.info(
f"Provider {provider_id} ({provider_type}) is not supported for ThreatScore report"
-60
View File
@@ -61,58 +61,6 @@ from prowler.lib.outputs.finding import Finding as FindingOutput
logger = get_task_logger(__name__)
def _cleanup_orphan_scheduled_scans(
tenant_id: str,
provider_id: str,
scheduler_task_id: int,
) -> int:
"""
TEMPORARY WORKAROUND: Clean up orphan AVAILABLE scans.
Detects and removes AVAILABLE scans that were never used due to an
issue during the first scheduled scan setup.
An AVAILABLE scan is considered orphan if there's also a SCHEDULED scan for
the same provider with the same scheduler_task_id. This situation indicates
that the first scan execution didn't find the AVAILABLE scan (because it
wasn't committed yet, probably) and created a new one, leaving the AVAILABLE orphaned.
Args:
tenant_id: The tenant ID.
provider_id: The provider ID.
scheduler_task_id: The PeriodicTask ID that triggers these scans.
Returns:
Number of orphan scans deleted (0 if none found).
"""
orphan_available_scans = Scan.objects.filter(
tenant_id=tenant_id,
provider_id=provider_id,
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=scheduler_task_id,
)
scheduled_scan_exists = Scan.objects.filter(
tenant_id=tenant_id,
provider_id=provider_id,
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=scheduler_task_id,
).exists()
if scheduled_scan_exists and orphan_available_scans.exists():
orphan_count = orphan_available_scans.count()
logger.warning(
f"[WORKAROUND] Found {orphan_count} orphan AVAILABLE scan(s) for "
f"provider {provider_id} alongside a SCHEDULED scan. Cleaning up orphans..."
)
orphan_available_scans.delete()
return orphan_count
return 0
def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str):
"""
Helper function to perform tasks after a scan is completed.
@@ -299,14 +247,6 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
return serializer.data
next_scan_datetime = get_next_execution_datetime(task_id, provider_id)
# TEMPORARY WORKAROUND: Clean up orphan scans from transaction isolation issue
_cleanup_orphan_scheduled_scans(
tenant_id=tenant_id,
provider_id=provider_id,
scheduler_task_id=periodic_task_instance.id,
)
scan_instance, _ = Scan.objects.get_or_create(
tenant_id=tenant_id,
provider_id=provider_id,
@@ -1199,6 +1199,9 @@ class TestSecurityHubIntegrationUploads:
)
assert result is False
# Integration should be marked as disconnected
integration.save.assert_called_once()
assert integration.connected is False
@patch("tasks.jobs.integrations.ASFF")
@patch("tasks.jobs.integrations.FindingOutput")
-344
View File
@@ -4,13 +4,11 @@ from unittest.mock import MagicMock, patch
import openai
import pytest
from botocore.exceptions import ClientError
from django_celery_beat.models import IntervalSchedule, PeriodicTask
from tasks.jobs.lighthouse_providers import (
_create_bedrock_client,
_extract_bedrock_credentials,
)
from tasks.tasks import (
_cleanup_orphan_scheduled_scans,
_perform_scan_complete_tasks,
check_integrations_task,
check_lighthouse_provider_connection_task,
@@ -24,8 +22,6 @@ from api.models import (
Integration,
LighthouseProviderConfiguration,
LighthouseProviderModels,
Scan,
StateChoices,
)
@@ -1719,343 +1715,3 @@ class TestRefreshLighthouseProviderModelsTask:
assert result["deleted"] == 0
assert "error" in result
assert result["error"] is not None
@pytest.mark.django_db
class TestCleanupOrphanScheduledScans:
"""Unit tests for _cleanup_orphan_scheduled_scans helper function."""
def _create_periodic_task(self, provider_id, tenant_id):
"""Helper to create a PeriodicTask for testing."""
interval, _ = IntervalSchedule.objects.get_or_create(every=24, period="hours")
return PeriodicTask.objects.create(
name=f"scan-perform-scheduled-{provider_id}",
task="scan-perform-scheduled",
interval=interval,
kwargs=f'{{"tenant_id": "{tenant_id}", "provider_id": "{provider_id}"}}',
enabled=True,
)
def test_cleanup_deletes_orphan_when_both_available_and_scheduled_exist(
self, tenants_fixture, providers_fixture
):
"""Test that AVAILABLE scan is deleted when SCHEDULED also exists."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Create orphan AVAILABLE scan
orphan_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task.id,
)
# Create SCHEDULED scan (next execution)
scheduled_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task.id,
)
# Execute cleanup
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
# Verify orphan was deleted
assert deleted_count == 1
assert not Scan.objects.filter(id=orphan_scan.id).exists()
assert Scan.objects.filter(id=scheduled_scan.id).exists()
def test_cleanup_does_not_delete_when_only_available_exists(
self, tenants_fixture, providers_fixture
):
"""Test that AVAILABLE scan is NOT deleted when no SCHEDULED exists."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Create only AVAILABLE scan (normal first scan scenario)
available_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task.id,
)
# Execute cleanup
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
# Verify nothing was deleted
assert deleted_count == 0
assert Scan.objects.filter(id=available_scan.id).exists()
def test_cleanup_does_not_delete_when_only_scheduled_exists(
self, tenants_fixture, providers_fixture
):
"""Test that nothing is deleted when only SCHEDULED exists."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Create only SCHEDULED scan (normal subsequent scan scenario)
scheduled_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task.id,
)
# Execute cleanup
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
# Verify nothing was deleted
assert deleted_count == 0
assert Scan.objects.filter(id=scheduled_scan.id).exists()
def test_cleanup_returns_zero_when_no_scans_exist(
self, tenants_fixture, providers_fixture
):
"""Test that cleanup returns 0 when no scans exist."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Execute cleanup with no scans
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
assert deleted_count == 0
def test_cleanup_deletes_multiple_orphan_available_scans(
self, tenants_fixture, providers_fixture
):
"""Test that multiple AVAILABLE orphan scans are all deleted."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Create multiple orphan AVAILABLE scans
orphan_scan_1 = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task.id,
)
orphan_scan_2 = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task.id,
)
# Create SCHEDULED scan
scheduled_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task.id,
)
# Execute cleanup
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
# Verify all orphans were deleted
assert deleted_count == 2
assert not Scan.objects.filter(id=orphan_scan_1.id).exists()
assert not Scan.objects.filter(id=orphan_scan_2.id).exists()
assert Scan.objects.filter(id=scheduled_scan.id).exists()
def test_cleanup_does_not_affect_different_provider(
self, tenants_fixture, providers_fixture
):
"""Test that cleanup only affects scans for the specified provider."""
tenant = tenants_fixture[0]
provider1 = providers_fixture[0]
provider2 = providers_fixture[1]
periodic_task1 = self._create_periodic_task(provider1.id, tenant.id)
periodic_task2 = self._create_periodic_task(provider2.id, tenant.id)
# Create orphan scenario for provider1
orphan_scan_p1 = Scan.objects.create(
tenant_id=tenant.id,
provider=provider1,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task1.id,
)
scheduled_scan_p1 = Scan.objects.create(
tenant_id=tenant.id,
provider=provider1,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task1.id,
)
# Create AVAILABLE scan for provider2 (should not be affected)
available_scan_p2 = Scan.objects.create(
tenant_id=tenant.id,
provider=provider2,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task2.id,
)
# Execute cleanup for provider1 only
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider1.id),
scheduler_task_id=periodic_task1.id,
)
# Verify only provider1's orphan was deleted
assert deleted_count == 1
assert not Scan.objects.filter(id=orphan_scan_p1.id).exists()
assert Scan.objects.filter(id=scheduled_scan_p1.id).exists()
assert Scan.objects.filter(id=available_scan_p2.id).exists()
def test_cleanup_does_not_affect_manual_scans(
self, tenants_fixture, providers_fixture
):
"""Test that cleanup only affects SCHEDULED trigger scans, not MANUAL."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task = self._create_periodic_task(provider.id, tenant.id)
# Create orphan AVAILABLE scheduled scan
orphan_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task.id,
)
# Create SCHEDULED scan
scheduled_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task.id,
)
# Create AVAILABLE manual scan (should not be affected)
manual_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Manual scan",
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.AVAILABLE,
)
# Execute cleanup
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task.id,
)
# Verify only scheduled orphan was deleted
assert deleted_count == 1
assert not Scan.objects.filter(id=orphan_scan.id).exists()
assert Scan.objects.filter(id=scheduled_scan.id).exists()
assert Scan.objects.filter(id=manual_scan.id).exists()
def test_cleanup_does_not_affect_different_scheduler_task(
self, tenants_fixture, providers_fixture
):
"""Test that cleanup only affects scans with the specified scheduler_task_id."""
tenant = tenants_fixture[0]
provider = providers_fixture[0]
periodic_task1 = self._create_periodic_task(provider.id, tenant.id)
# Create another periodic task
interval, _ = IntervalSchedule.objects.get_or_create(every=24, period="hours")
periodic_task2 = PeriodicTask.objects.create(
name=f"scan-perform-scheduled-other-{provider.id}",
task="scan-perform-scheduled",
interval=interval,
kwargs=f'{{"tenant_id": "{tenant.id}", "provider_id": "{provider.id}"}}',
enabled=True,
)
# Create orphan scenario for periodic_task1
orphan_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task1.id,
)
scheduled_scan = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduler_task_id=periodic_task1.id,
)
# Create AVAILABLE scan for periodic_task2 (should not be affected)
available_scan_other_task = Scan.objects.create(
tenant_id=tenant.id,
provider=provider,
name="Daily scheduled scan",
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduler_task_id=periodic_task2.id,
)
# Execute cleanup for periodic_task1 only
deleted_count = _cleanup_orphan_scheduled_scans(
tenant_id=str(tenant.id),
provider_id=str(provider.id),
scheduler_task_id=periodic_task1.id,
)
# Verify only periodic_task1's orphan was deleted
assert deleted_count == 1
assert not Scan.objects.filter(id=orphan_scan.id).exists()
assert Scan.objects.filter(id=scheduled_scan.id).exists()
assert Scan.objects.filter(id=available_scan_other_task.id).exists()
@@ -1,28 +0,0 @@
import warnings
from dashboard.common_methods import get_section_containers_threatscore
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_threatscore(
aux,
"REQUIREMENTS_ATTRIBUTES_SECTION",
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
"REQUIREMENTS_ID",
)
+8 -46
View File
@@ -407,11 +407,9 @@ def display_data(
compliance_module = importlib.import_module(
f"dashboard.compliance.{current}"
)
# Build subset list based on available columns
dedup_columns = ["CHECKID", "STATUS", "RESOURCEID", "STATUSEXTENDED"]
if "MUTED" in data.columns:
dedup_columns.insert(2, "MUTED")
data = data.drop_duplicates(subset=dedup_columns)
data = data.drop_duplicates(
subset=["CHECKID", "STATUS", "MUTED", "RESOURCEID", "STATUSEXTENDED"]
)
if "threatscore" in analytics_input:
data = get_threatscore_mean_by_pillar(data)
@@ -654,7 +652,6 @@ def get_table(current_compliance, table):
def get_threatscore_mean_by_pillar(df):
score_per_pillar = {}
max_score_per_pillar = {}
counted_findings_per_pillar = {}
for _, row in df.iterrows():
pillar = (
@@ -666,18 +663,6 @@ def get_threatscore_mean_by_pillar(df):
if pillar not in score_per_pillar:
score_per_pillar[pillar] = 0
max_score_per_pillar[pillar] = 0
counted_findings_per_pillar[pillar] = set()
# Skip muted findings for score calculation
is_muted = "MUTED" in df.columns and row.get("MUTED") == "True"
if is_muted:
continue
# Create unique finding identifier to avoid counting duplicates
finding_id = f"{row.get('CHECKID', '')}_{row.get('RESOURCEID', '')}"
if finding_id in counted_findings_per_pillar[pillar]:
continue
counted_findings_per_pillar[pillar].add(finding_id)
level_of_risk = pd.to_numeric(
row["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
@@ -721,10 +706,6 @@ def get_table_prowler_threatscore(df):
score_per_pillar = {}
max_score_per_pillar = {}
pillars = {}
counted_findings_per_pillar = {}
counted_pass = set()
counted_fail = set()
counted_muted = set()
df_copy = df.copy()
@@ -739,24 +720,6 @@ def get_table_prowler_threatscore(df):
pillars[pillar] = {"FAIL": 0, "PASS": 0, "MUTED": 0}
score_per_pillar[pillar] = 0
max_score_per_pillar[pillar] = 0
counted_findings_per_pillar[pillar] = set()
# Create unique finding identifier
finding_id = f"{row.get('CHECKID', '')}_{row.get('RESOURCEID', '')}"
# Check if muted
is_muted = "MUTED" in df_copy.columns and row.get("MUTED") == "True"
# Count muted findings (separate from score calculation)
if is_muted and finding_id not in counted_muted:
counted_muted.add(finding_id)
pillars[pillar]["MUTED"] += 1
continue # Skip muted findings for score calculation
# Skip if already counted for this pillar
if finding_id in counted_findings_per_pillar[pillar]:
continue
counted_findings_per_pillar[pillar].add(finding_id)
level_of_risk = pd.to_numeric(
row["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
@@ -775,14 +738,13 @@ def get_table_prowler_threatscore(df):
max_score_per_pillar[pillar] += level_of_risk * weight
if row["STATUS"] == "PASS":
if finding_id not in counted_pass:
counted_pass.add(finding_id)
pillars[pillar]["PASS"] += 1
pillars[pillar]["PASS"] += 1
score_per_pillar[pillar] += level_of_risk * weight
elif row["STATUS"] == "FAIL":
if finding_id not in counted_fail:
counted_fail.add(finding_id)
pillars[pillar]["FAIL"] += 1
pillars[pillar]["FAIL"] += 1
if "MUTED" in row and row["MUTED"] == "True":
pillars[pillar]["MUTED"] += 1
result_df = []
+1 -34
View File
@@ -41,9 +41,6 @@ services:
volumes:
- "./ui:/app"
- "/app/node_modules"
depends_on:
mcp-server:
condition: service_healthy
postgres:
image: postgres:16.3-alpine3.20
@@ -60,11 +57,7 @@ services:
ports:
- "${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}"
healthcheck:
test:
[
"CMD-SHELL",
"sh -c 'pg_isready -U ${POSTGRES_ADMIN_USER} -d ${POSTGRES_DB}'",
]
test: ["CMD-SHELL", "sh -c 'pg_isready -U ${POSTGRES_ADMIN_USER} -d ${POSTGRES_DB}'"]
interval: 5s
timeout: 5s
retries: 5
@@ -125,32 +118,6 @@ services:
- "../docker-entrypoint.sh"
- "beat"
mcp-server:
build:
context: ./mcp_server
dockerfile: Dockerfile
environment:
- PROWLER_MCP_TRANSPORT_MODE=http
env_file:
- path: .env
required: false
ports:
- "8000:8000"
volumes:
- ./mcp_server/prowler_mcp_server:/app/prowler_mcp_server
- ./mcp_server/pyproject.toml:/app/pyproject.toml
- ./mcp_server/entrypoint.sh:/app/entrypoint.sh
command: ["uvicorn", "--host", "0.0.0.0", "--port", "8000"]
healthcheck:
test:
[
"CMD-SHELL",
"wget -q -O /dev/null http://127.0.0.1:8000/health || exit 1",
]
interval: 10s
timeout: 5s
retries: 3
volumes:
outputs:
driver: local
-25
View File
@@ -1,9 +1,3 @@
# Production Docker Compose configuration
# Uses pre-built images from Docker Hub (prowlercloud/*)
#
# For development with local builds and hot-reload, use docker-compose-dev.yml instead:
# docker compose -f docker-compose-dev.yml up
#
services:
api:
hostname: "prowler-api"
@@ -32,9 +26,6 @@ services:
required: false
ports:
- ${UI_PORT:-3000}:${UI_PORT:-3000}
depends_on:
mcp-server:
condition: service_healthy
postgres:
image: postgres:16.3-alpine3.20
@@ -102,22 +93,6 @@ services:
- "../docker-entrypoint.sh"
- "beat"
mcp-server:
image: prowlercloud/prowler-mcp:${PROWLER_MCP_VERSION:-stable}
environment:
- PROWLER_MCP_TRANSPORT_MODE=http
env_file:
- path: .env
required: false
ports:
- "8000:8000"
command: ["uvicorn", "--host", "0.0.0.0", "--port", "8000"]
healthcheck:
test: ["CMD-SHELL", "wget -q -O /dev/null http://127.0.0.1:8000/health || exit 1"]
interval: 10s
timeout: 5s
retries: 3
volumes:
output:
driver: local
+1 -2
View File
@@ -312,8 +312,7 @@ The type of resource being audited. This field helps categorize and organize fin
- **Azure**: Use types from [Azure Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/reference/supported-tables-resources), for example: `Microsoft.Storage/storageAccounts`.
- **Google Cloud**: Use [Cloud Asset Inventory asset types](https://cloud.google.com/asset-inventory/docs/asset-types), for example: `compute.googleapis.com/Instance`.
- **Kubernetes**: Use types shown under `KIND` from `kubectl api-resources`.
- **Oracle Cloud Infrastructure**: Use types from [Oracle Cloud Infrastructure documentation](https://docs.public.oneportal.content.oci.oraclecloud.com/en-us/iaas/Content/Search/Tasks/queryingresources_topic-Listing_Supported_Resource_Types.htm).
- **M365 / GitHub / MongoDB Atlas**: Leave empty due to lack of standardized types.
- **M365 / GitHub**: Leave empty due to lack of standardized types.
#### Description
-327
View File
@@ -1,327 +0,0 @@
---
title: 'End-2-End Tests for Prowler App'
---
End-to-end (E2E) tests validate complete user flows in Prowler App (UI + API). These tests are implemented with [Playwright](https://playwright.dev/) under the `ui/tests` folder and are designed to run against a Prowler App environment.
## General Recommendations
When adding or maintaining E2E tests for Prowler App, follow these guidelines:
1. **Test real user journeys**
Focus on full workflows (for example, sign-up → login → add provider → launch scan) instead of low-level UI details already covered by unit or integration tests.
2. **Group tests by entity or feature area**
- Organize E2E tests by entity or feature area (for example, `providers.spec.ts`, `scans.spec.ts`, `invitations.spec.ts`, `sign-up.spec.ts`).
- Each entity should have its own test file and corresponding page model class (for example, `ProvidersPage`, `ScansPage`, `InvitationsPage`).
- Related tests for the same entity should be grouped together in the same test file to improve maintainability and make it easier to find and update tests for a specific feature.
3. **Use a Page Model (Page Object Model)**
- Encapsulate selectors and common actions in page classes instead of repeating them in each test.
- Leverage and extend the existing Playwright page models in `ui/tests`—such as `ProvidersPage`, `ScansPage`, and others—which are all based on the shared `BasePage`.
- Page models for Prowler App pages should be placed in their respective entity folders (for example, `ui/tests/providers/providers-page.ts`).
- Page models for external pages (not part of Prowler App) should be grouped in the `external` folder (for example, `ui/tests/external/github-page.ts`).
- This approach improves readability, reduces duplication, and makes refactors safer.
4. **Reuse authentication states (StorageState)**
- Multiple authentication setup projects are available that generate pre-authenticated state files stored in `playwright/.auth/`. Each project requires specific environment variables:
- `admin.auth.setup` Admin users with full system permissions (requires `E2E_ADMIN_USER` / `E2E_ADMIN_PASSWORD`)
- `manage-scans.auth.setup` Users with scan management permissions (requires `E2E_MANAGE_SCANS_USER` / `E2E_MANAGE_SCANS_PASSWORD`)
- `manage-integrations.auth.setup` Users with integration management permissions (requires `E2E_MANAGE_INTEGRATIONS_USER` / `E2E_MANAGE_INTEGRATIONS_PASSWORD`)
- `manage-account.auth.setup` Users with account management permissions (requires `E2E_MANAGE_ACCOUNT_USER` / `E2E_MANAGE_ACCOUNT_PASSWORD`)
- `manage-cloud-providers.auth.setup` Users with cloud provider management permissions (requires `E2E_MANAGE_CLOUD_PROVIDERS_USER` / `E2E_MANAGE_CLOUD_PROVIDERS_PASSWORD`)
- `unlimited-visibility.auth.setup` Users with unlimited visibility permissions (requires `E2E_UNLIMITED_VISIBILITY_USER` / `E2E_UNLIMITED_VISIBILITY_PASSWORD`)
- `invite-and-manage-users.auth.setup` Users with user invitation and management permissions (requires `E2E_INVITE_AND_MANAGE_USERS_USER` / `E2E_INVITE_AND_MANAGE_USERS_PASSWORD`)
<Note>
If fixtures have been applied (fixtures are used to populate the database with initial development data), you can use the user `e2e@prowler.com` with password `Thisisapassword123@` to configure the Admin credentials by setting `E2E_ADMIN_USER=e2e@prowler.com` and `E2E_ADMIN_PASSWORD=Thisisapassword123@`.
</Note>
- Within test files, use `test.use({ storageState: "playwright/.auth/admin_user.json" })` to load the pre-authenticated state, avoiding redundant authentication steps in each test. This must be placed at the test level (not inside the test function) to apply the authentication state to all tests in that scope. This approach is preferred over declaring dependencies in `playwright.config.ts` because it provides more control over which authentication states are used in specific tests.
**Example:**
```typescript
// Use admin authentication state for all tests in this scope
test.use({ storageState: "playwright/.auth/admin_user.json" });
test("should perform admin action", async ({ page }) => {
// Test implementation
});
```
5. **Tag and document scenarios**
- Follow the existing naming convention for suites and test cases (for example, `SCANS-E2E-001`, `PROVIDER-E2E-003`) and use tags such as `@e2e`, `@serial` and feature tags (for example, `@providers`, `@scans`,`@aws`) to filter and organize tests.
**Example:**
```typescript
test(
"should add a new AWS provider with static credentials",
{
tag: [
"@critical",
"@e2e",
"@providers",
"@aws",
"@serial",
"@PROVIDER-E2E-001",
],
},
async ({ page }) => {
// Test implementation
}
);
```
- Document each one in the Markdown files under `ui/tests`, including **Priority**, **Tags**, **Description**, **Preconditions**, **Flow steps**, **Expected results**,**Key verification points** and **Notes**.
**Example**
```Markdown
## Test Case: `SCANS-E2E-001` - Execute On-Demand Scan
**Priority:** `critical`
**Tags:**
- type → @e2e, @serial
- feature → @scans
**Description/Objective:** Validates the complete flow to execute an on-demand scan selecting a provider by UID and confirming success on the Scans page.
**Preconditions:**
- Admin user authentication required (admin.auth.setup setup)
- Environment variables configured for : E2E_AWS_PROVIDER_ACCOUNT_ID,E2E_AWS_PROVIDER_ACCESS_KEY and E2E_AWS_PROVIDER_SECRET_KEY
- Remove any existing AWS provider with the same Account ID before starting the test
- This test must be run serially and never in parallel with other tests, as it requires the Account ID Provider to be already registered.
### Flow Steps:
1. Navigate to Scans page
2. Open provider selector and choose the entry whose text contains E2E_AWS_PROVIDER_ACCOUNT_ID
3. Optionally fill scan label (alias)
4. Click "Start now" to launch the scan
5. Verify the success toast appears
6. Verify a row in the Scans table contains the provided scan label (or shows the new scan entry)
### Expected Result:
- Scan is launched successfully
- Success toast is displayed to the user
- Scans table displays the new scan entry (including the alias when provided)
### Key verification points:
- Scans page loads correctly
- Provider select is available and lists the configured provider UID
- "Start now" button is rendered and enabled when form is valid
- Success toast message: "The scan was launched successfully."
- Table contains a row with the scan label or new scan state (queued/available/executing)
### Notes:
- The table may take a short time to reflect the new scan; assertions look for a row containing the alias.
- Provider cleanup performed before each test to ensure clean state
- Tests should run serially to avoid state conflicts.
```
6. **Use environment variables for secrets and dynamic data**
Credentials, provider identifiers, secrets, tokens must come from environment variables (for example, `E2E_AWS_PROVIDER_ACCOUNT_ID`, `E2E_AWS_PROVIDER_ACCESS_KEY`, `E2E_AWS_PROVIDER_SECRET_KEY`, `E2E_GCP_PROJECT_ID`).
<Warning>
Never commit real secrets, tokens, or account IDs to the repository.
</Warning>
7. **Keep tests deterministic and isolated**
- Use Playwright's `test.beforeEach()` and `test.afterEach()` hooks to manage test state:
- **`test.beforeEach()`**: Execute cleanup or setup logic before each test runs (for example, delete existing providers with a specific account ID to ensure a clean state).
- **`test.afterEach()`**: Execute cleanup logic after each test completes (for example, remove test data created during the test execution to prevent interference with subsequent tests).
- Define tests as serial using `test.describe.serial()` when they share state or resources that could interfere with parallel execution (for example, tests that use the same provider account ID or create dependent resources). This ensures tests within the serial group run sequentially, preventing race conditions and data conflicts.
- Use unique identifiers (for example, random suffixes for emails or labels) to prevent data collisions.
8. **Use explicit waiting strategies**
- Avoid using `waitForLoadState('networkidle')` as it is unreliable and can lead to flaky tests or unnecessary delays.
- Leverage Playwright's auto-waiting capabilities by waiting for specific elements to be actionable (for example, `locator.click()`, `locator.fill()`, `locator.waitFor()`).
- **Prioritize selector strategies**: Prefer `page.getByRole()` over other approaches like `page.getByText()`. `getByRole()` is more resilient to UI changes, aligns with accessibility best practices, and better reflects how users interact with the application (by role and accessible name rather than implementation details).
- For dynamic content, wait for specific UI elements that indicate the page is ready (for example, button becoming enabled, a specific text appearing, etc).
- This approach makes tests more reliable, faster, and aligned with how users actually interact with the application.
**Common waiting patterns used in Prowler E2E tests:**
- **Element visibility assertions**: Use `expect(locator).toBeVisible()` or `expect(locator).not.toBeVisible()` to wait for elements to appear or disappear (Playwright automatically waits for these conditions).
- **URL changes**: Use `expect(page).toHaveURL(url)` or `page.waitForURL(url)` to wait for navigation to complete.
- **Element states**: Use `locator.waitFor({ state: "visible" })` or `locator.waitFor({ state: "hidden" })` when you need explicit state control.
- **Text content**: Use `expect(locator).toHaveText(text)` or `expect(locator).toContainText(text)` to wait for specific text to appear.
- **Element attributes**: Use `expect(locator).toHaveAttribute(name, value)` to wait for attributes like `aria-disabled="false"` indicating a button is enabled.
- **Custom conditions**: Use `page.waitForFunction(() => condition)` for complex conditions that cannot be expressed with locators (for example, checking DOM element dimensions or computed styles).
- **Retryable assertions**: Use `expect(async () => { ... }).toPass({ timeout })` for conditions that may take time to stabilize (for example, waiting for table rows to filter after a server request).
- **Scroll into view**: Use `locator.scrollIntoViewIfNeeded()` before interacting with elements that may be outside the viewport.
**Example from Prowler tests:**
```typescript
// Wait for page to load by checking main content is visible
await expect(page.locator("main")).toBeVisible();
// Wait for URL change after form submission
await expect(page).toHaveURL("/providers");
// Wait for button to become enabled
await expect(submitButton).toHaveAttribute("aria-disabled", "false");
// Wait for loading spinner to disappear
await expect(page.getByText("Loading")).not.toBeVisible();
// Wait for custom condition
await page.waitForFunction(() => {
const main = document.querySelector("main");
return main && main.offsetHeight > 0;
});
// Wait for retryable condition (e.g., table filtering)
await expect(async () => {
const rowCount = await tableRows.count();
expect(rowCount).toBeLessThanOrEqual(1);
}).toPass({ timeout: 20000 });
```
## Running Prowler Tests
E2E tests for Prowler App run from the `ui` project using Playwright. The Playwright configuration lives in `ui/playwright.config.ts` and defines:
- `testDir: "./tests"` location of E2E test files (relative to the `ui` project root, so `ui/tests`).
- `webServer` how to start the Next.js development server and connect to Prowler API.
- `use.baseURL` base URL for browser interactions (defaults to `http://localhost:3000` or `AUTH_URL` if set).
- `reporter: [["list"]]` uses the list reporter to display test results in a concise format in the terminal. Other reporter options are available (for example, `html`, `json`, `junit`, `github`), and multiple reporters can be configured simultaneously. See the [Playwright reporter documentation](https://playwright.dev/docs/test-reporters) for all available options.
- `expect.timeout: 20000` timeout for assertions (20 seconds). This is the maximum time Playwright will wait for an assertion to pass before considering it failed.
- **Test artifacts** (in `use` configuration): By default, `trace`, `screenshot`, and `video` are set to `"off"` to minimize resource usage. To review test failures or debug issues, these can be enabled in `playwright.config.ts` by changing them to `"on"`, `"on-first-retry"`, or `"retain-on-failure"` depending on your needs.
- `outputDir: "/tmp/playwright-tests"` directory where Playwright stores test artifacts (screenshots, videos, traces) during test execution.
- **CI-specific configuration**: The configuration uses different settings when running in CI environments (detected via `process.env.CI`):
- **Retries**: `2` retries in CI (to handle flaky tests), `0` retries locally (for faster feedback during development).
- **Workers**: `1` worker in CI (sequential execution for stability), `undefined` locally (parallel execution by default for faster test runs).
### Prerequisites
Before running E2E tests:
- **Install root and UI dependencies**
- Follow the [developer guide introduction](/developer-guide/introduction#getting-the-code-and-installing-all-dependencies) to clone the repository and install core dependencies.
- From the `ui` directory, install frontend dependencies:
```bash
cd ui
pnpm install
pnpm run test:e2e:install # Install Playwright browsers
```
- **Ensure Prowler API is available**
- By default, Playwright uses `NEXT_PUBLIC_API_BASE_URL=http://localhost:8080/api/v1` (configured in `playwright.config.ts`).
- Start Prowler API so it is reachable on that URL (for example, via `docker-compose-dev.yml` or the development orchestration used locally).
- If a different API URL is required, set `NEXT_PUBLIC_API_BASE_URL` accordingly before running the tests.
- **Ensure Prowler App UI is available**
- Playwright automatically starts the Next.js server through the `webServer` block in `playwright.config.ts` (`pnpm run dev` by default).
- If the UI is already running on `http://localhost:3000`, Playwright will reuse the existing server when `reuseExistingServer` is `true`.
- **Configure E2E environment variables**
- Suite-specific variables (for example, provider account IDs, credentials, and E2E user data) must be provided before running tests.
- They can be defined either:
- As exported environment variables in the shell before executing the Playwright commands, or
- In a `.env.local` or `.env` file under `ui/`, and then loaded into the shell before running tests, for example:
```bash
cd ui
set -a
source .env.local # or .env
set +a
```
- Refer to the Markdown documentation files in `ui/tests` for each E2E suite (for example, the `*.md` files that describe sign-up, providers, scans, invitations, and other flows) to see the exact list of required variables and their meaning.
- Each E2E test suite explicitly checks that its required environment variables are defined at runtime and will fail with a clear error message if any mandatory variable is missing, making misconfiguration easy to detect.
### Executing Tests
To execute E2E tests for Prowler App:
1. **Run the full E2E suite (headless)**
From the `ui` directory:
```bash
pnpm run test:e2e
```
This command runs Playwright with the configured projects
2. **Run E2E tests with the Playwright UI runner**
```bash
pnpm run test:e2e:ui
```
This opens the Playwright test runner UI to inspect, debug, and rerun specific tests or projects.
3. **Debug E2E tests interactively**
```bash
pnpm run test:e2e:debug
```
Use this mode to step through flows, inspect selectors, and adjust timings. It runs tests in headed mode with debugging tools enabled.
4. **Run tests in headed mode without debugger**
```bash
pnpm run test:e2e:headed
```
This is useful to visually confirm flows while still running the full suite.
5. **View previous test reports**
```bash
pnpm run test:e2e:report
```
This opens the latest Playwright HTML report, including traces and screenshots when enabled.
6. **Run specific tests or subsets**
In addition to the predefined scripts, Playwright allows filtering which tests run. These examples use the Playwright CLI directly through `pnpm`:
- **By test ID (`@ID` in the test metadata or description)**
To run a single test case identified by its ID (for example, `@PROVIDER-E2E-001` or `@SCANS-E2E-001`):
```bash
pnpm playwright test --grep @PROVIDER-E2E-001
```
- **By tags**
To run all tests that share a common tag (for example, all provider E2E tests tagged with `@providers`):
```bash
pnpm playwright test --grep @providers
```
This is useful to focus on a specific feature area such as providers, scans, invitations, or sign-up.
- **By Playwright project**
To run only the tests associated with a given project defined in `playwright.config.ts` (for example, `providers` or `scans`):
```bash
pnpm playwright test --project=providers
```
Combining project and grep filters is also supported, enabling very narrow runs (for example, a single test ID within the `providers` project). For additional CLI options and combinations, see the [Playwright command line documentation](https://playwright.dev/docs/test-cli).
<Note>
For detailed flows, preconditions, and environment variable requirements per feature, always refer to the Markdown files in `ui/tests`. Those documents are the single source of truth for business expectations and validation points in each E2E suite.
</Note>
-1
View File
@@ -220,7 +220,6 @@ The function returns a JSON file containing the list of regions for the provider
"sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2"
],
"aws-cn": ["cn-north-1", "cn-northwest-1"],
"aws-eusc": ["eusc-de-east-1"],
"aws-us-gov": ["us-gov-east-1", "us-gov-west-1"]
}
}
+10 -34
View File
@@ -19,9 +19,7 @@
"groups": [
{
"group": "Welcome",
"pages": [
"introduction"
]
"pages": ["introduction"]
},
{
"group": "Prowler Cloud",
@@ -51,9 +49,7 @@
},
{
"group": "Prowler Lighthouse AI",
"pages": [
"getting-started/products/prowler-lighthouse-ai"
]
"pages": ["getting-started/products/prowler-lighthouse-ai"]
},
{
"group": "Prowler MCP Server",
@@ -99,14 +95,7 @@
},
"user-guide/tutorials/prowler-app-rbac",
"user-guide/tutorials/prowler-app-api-keys",
{
"group": "Mutelist",
"expanded": true,
"pages": [
"user-guide/tutorials/prowler-app-simple-mutelist",
"user-guide/tutorials/prowler-app-mute-findings"
]
},
"user-guide/tutorials/prowler-app-mute-findings",
{
"group": "Integrations",
"expanded": true,
@@ -160,9 +149,7 @@
"user-guide/cli/tutorials/quick-inventory",
{
"group": "Tutorials",
"pages": [
"user-guide/cli/tutorials/parallel-execution"
]
"pages": ["user-guide/cli/tutorials/parallel-execution"]
}
]
},
@@ -250,9 +237,7 @@
},
{
"group": "LLM",
"pages": [
"user-guide/providers/llm/getting-started-llm"
]
"pages": ["user-guide/providers/llm/getting-started-llm"]
},
{
"group": "Oracle Cloud Infrastructure",
@@ -265,9 +250,7 @@
},
{
"group": "Compliance",
"pages": [
"user-guide/compliance/tutorials/threatscore"
]
"pages": ["user-guide/compliance/tutorials/threatscore"]
}
]
},
@@ -308,8 +291,7 @@
"group": "Testing",
"pages": [
"developer-guide/unit-testing",
"developer-guide/integration-testing",
"developer-guide/end2end-testing"
"developer-guide/integration-testing"
]
},
"developer-guide/debugging",
@@ -322,21 +304,15 @@
},
{
"tab": "Security",
"pages": [
"security"
]
"pages": ["security"]
},
{
"tab": "Contact Us",
"pages": [
"contact"
]
"pages": ["contact"]
},
{
"tab": "Troubleshooting",
"pages": [
"troubleshooting"
]
"pages": ["troubleshooting"]
},
{
"tab": "About Us",
@@ -10,7 +10,7 @@ Complete reference guide for all tools available in the Prowler MCP Server. Tool
|----------|------------|------------------------|
| Prowler Hub | 10 tools | No |
| Prowler Documentation | 2 tools | No |
| Prowler Cloud/App | 24 tools | Yes |
| Prowler Cloud/App | 22 tools | Yes |
## Tool Naming Convention
@@ -80,24 +80,16 @@ Tools for managing finding muting, including pattern-based bulk muting (mutelist
- **`prowler_app_update_mute_rule`** - Update a mute rule's name, reason, or enabled status
- **`prowler_app_delete_mute_rule`** - Delete a mute rule from the system
### Compliance Management
Tools for viewing compliance status and framework details across all cloud providers.
- **`prowler_app_get_compliance_overview`** - Get high-level compliance status across all frameworks for a specific scan or provider, including pass/fail statistics per framework
- **`prowler_app_get_compliance_framework_state_details`** - Get detailed requirement-level breakdown for a specific compliance framework, including failed requirements and associated finding IDs
## Prowler Hub Tools
Access Prowler's security check catalog and compliance frameworks. **No authentication required.**
Tools follow a **two-tier pattern**: lightweight listing for browsing + detailed retrieval for complete information.
### Check Discovery
### Check Discovery and Details
- **`prowler_hub_list_checks`** - List security checks with lightweight data (id, title, severity, provider) and advanced filtering options
- **`prowler_hub_semantic_search_checks`** - Full-text search across check metadata with lightweight results
- **`prowler_hub_get_check_details`** - Get comprehensive details for a specific check including risk, remediation guidance, and compliance mappings
- **`prowler_hub_get_checks`** - List security checks with advanced filtering options
- **`prowler_hub_get_check_filters`** - Return available filter values for checks (providers, services, severities, categories, compliances)
- **`prowler_hub_search_checks`** - Full-text search across check metadata
- **`prowler_hub_get_check_raw_metadata`** - Fetch raw check metadata in JSON format
### Check Code
@@ -106,21 +98,20 @@ Tools follow a **two-tier pattern**: lightweight listing for browsing + detailed
### Compliance Frameworks
- **`prowler_hub_list_compliances`** - List compliance frameworks with lightweight data (id, name, provider) and filtering options
- **`prowler_hub_semantic_search_compliances`** - Full-text search across compliance frameworks with lightweight results
- **`prowler_hub_get_compliance_details`** - Get comprehensive compliance details including requirements and mapped checks
- **`prowler_hub_get_compliance_frameworks`** - List and filter compliance frameworks
- **`prowler_hub_search_compliance_frameworks`** - Full-text search across compliance frameworks
### Providers Information
### Provider Information
- **`prowler_hub_list_providers`** - List Prowler official providers
- **`prowler_hub_get_provider_services`** - Get available services for a specific provider
- **`prowler_hub_list_providers`** - List Prowler official providers and their services
- **`prowler_hub_get_artifacts_count`** - Get total count of checks and frameworks in Prowler Hub
## Prowler Documentation Tools
Search and access official Prowler documentation. **No authentication required.**
- **`prowler_docs_search`** - Search the official Prowler documentation using full-text search with the `term` parameter
- **`prowler_docs_get_document`** - Retrieve the full markdown content of a specific documentation file using the path from search results
- **`prowler_docs_search`** - Search the official Prowler documentation using full-text search
- **`prowler_docs_get_document`** - Retrieve the full markdown content of a specific documentation file
## Usage Tips
@@ -115,15 +115,10 @@ To update the environment file:
Edit the `.env` file and change version values:
```env
PROWLER_UI_VERSION="5.16.0"
PROWLER_API_VERSION="5.16.0"
PROWLER_UI_VERSION="5.9.0"
PROWLER_API_VERSION="5.9.0"
```
<Note>
You can find the latest versions of Prowler App in the [Releases Github section](https://github.com/prowler-cloud/prowler/releases) or in the [Container Versions](#container-versions) section of this documentation.
</Note>
#### Option 2: Using Docker Compose Pull
```bash
@@ -6,7 +6,7 @@ title: "Overview"
**Why this matters**: Every engineer has asked, “What does this check actually do?” Prowler Hub answers that question in one place, lets you pin to a specific version, and pulls definitions into your own tools or dashboards.
![](/images/products/prowler-hub.png)
![](/images/products/prowler-hub.webp)
<Card title="Go to Prowler Hub" href="https://hub.prowler.com" />
@@ -14,4 +14,4 @@ Prowler Hub also provides a fully documented public API that you can integrate i
📚 Explore the API docs at: https://hub.prowler.com/api/docs
Whether youre customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
Whether youre customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
Binary file not shown.

Before

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 210 KiB

@@ -93,11 +93,6 @@ The following list includes all the Azure checks with configurable variables tha
## GCP
### Configurable Checks
The following list includes all the GCP checks with configurable variables that can be changed in the configuration yaml file:
| Check Name | Value | Type |
|---------------------------------------------------------------|--------------------------------------------------|-----------------|
| `compute_instance_group_multiple_zones` | `mig_min_zones` | Integer |
## Kubernetes
@@ -553,9 +548,6 @@ gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# gcp.compute_instance_group_multiple_zones
# Minimum number of zones a MIG should span for high availability
mig_min_zones: 2
# Kubernetes Configuration
kubernetes:
@@ -6,16 +6,15 @@ By default Prowler is able to scan the following AWS partitions:
- Commercial: `aws`
- China: `aws-cn`
- European Sovereign Cloud: `aws-eusc`
- GovCloud (US): `aws-us-gov`
<Note>
To check the available regions for each partition and service, refer to: [aws\_regions\_by\_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json)
</Note>
## Scanning AWS China, European Sovereign Cloud and GovCloud Partitions in Prowler
## Scanning AWS China and GovCloud Partitions in Prowler
When scanning the China (`aws-cn`), European Sovereign Cloud (`aws-eusc`) or GovCloud (`aws-us-gov`) partitions, ensure one of the following:
When scanning the China (`aws-cn`) or GovCloud (`aws-us-gov`), ensure one of the following:
- Your AWS credentials include a valid region within the desired partition.
@@ -84,29 +83,6 @@ To scan an account in the AWS GovCloud (US) partition (`aws-us-gov`):
<Note>
With this configuration, all partition regions will be scanned without needing the `-f/--region` flag
</Note>
### AWS European Sovereign Cloud
To scan an account in the AWS European Sovereign Cloud partition (`aws-eusc`):
- By using the `-f/--region` flag:
```
prowler aws --region eusc-de-east-1
```
- By using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
```
[default]
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
region = eusc-de-east-1
```
<Note>
With this configuration, all partition regions will be scanned without needing the `-f/--region` flag
</Note>
### AWS ISO (US \& Europe)
@@ -123,9 +99,6 @@ The AWS ISO partitions—commonly referred to as "secret partitions"—are air-g
"cn-north-1",
"cn-northwest-1"
],
"aws-eusc": [
"eusc-de-east-1"
],
"aws-us-gov": [
"us-gov-east-1",
"us-gov-west-1"
@@ -1,26 +1,20 @@
---
title: 'Advanced Mutelist (YAML)'
title: 'Mute Findings (Mutelist)'
---
import { VersionBadge } from "/snippets/version-badge.mdx"
<VersionBadge version="5.9.0" />
Prowler App allows users to mute specific findings to focus on the most critical security issues. This guide demonstrates how to use the Advanced Mutelist feature with YAML configuration for complex, pattern-based muting rules.
Prowler App allows users to mute specific findings to focus on the most critical security issues. This comprehensive guide demonstrates how to effectively use the Mutelist feature to manage and prioritize security findings.
<Note>
For muting individual findings without YAML configuration, use [Simple Mutelist](/user-guide/tutorials/prowler-app-simple-mutelist) to mute findings directly from the Findings table.
## What Is the Mutelist Feature?
</Note>
The Mutelist feature enables users to:
## What Is Advanced Mutelist?
Advanced Mutelist enables users to create powerful, pattern-based muting rules using YAML configuration:
- **Define complex muting patterns** using regular expressions
- **Mute findings by check, region, resource, or tag** across multiple accounts
- **Apply wildcards** to mute entire categories of findings
- **Create exceptions** within broad muting rules
- **Suppress specific findings** from appearing in future scans
- **Focus on critical issues** by hiding resolved or accepted risks
- **Maintain audit trails** of muted findings for compliance purposes
- **Streamline security workflows** by reducing noise from non-critical findings
## Prerequisites
@@ -34,51 +28,46 @@ Before muting findings, ensure:
Muting findings does not resolve underlying security issues. Review each finding carefully before muting to ensure it represents an acceptable risk or has been properly addressed.
</Warning>
## Step 1: Connect a Provider
## Step 1: Add a provider
To configure Advanced Mutelist:
To configure Mutelist:
1. Log into Prowler App
2. Navigate to the Providers page
2. Navigate to the providers page
![Add provider](/images/mutelist-ui-1.png)
3. Connect a provider to enable Mutelist configuration
3. Add a provider, then "Configure Muted Findings" button will be enabled in providers page and scans page
![Button enabled in providers page](/images/mutelist-ui-2.png)
![Button enabled in scans pages](/images/mutelist-ui-3.png)
## Step 2: Configure Advanced Mutelist
## Step 2: Configure Mutelist
1. Navigate to the Mutelist page from the left navigation menu
2. Select the "Advanced" tab
3. Provide a valid Mutelist configuration in `YAML` format
<Note>
The YAML format follows the same specification as Prowler CLI. See [CLI Mutelist documentation](/user-guide/cli/tutorials/mutelist) for detailed syntax reference.
</Note>
1. Open the modal by clicking "Configure Muted Findings" button
![Open modal](/images/mutelist-ui-4.png)
1. Provide a valid Mutelist in `YAML` format. More details about Mutelist [here](/user-guide/cli/tutorials/mutelist)
![Valid YAML configuration](/images/mutelist-ui-5.png)
If the YAML configuration is invalid, an error message will be displayed
![Wrong YAML configuration](/images/mutelist-ui-7.png)
![Wrong YAML configuration 2](/images/mutelist-ui-8.png)
## Step 3: Review and Update the Configuration
## Step 3: Review the Mutelist
1. Once added, the configuration can be updated or removed from the Advanced tab
1. Once added, the configuration can be removed or updated
![Remove or update configuration](/images/mutelist-ui-6.png)
## Step 4: Verify Muted Findings in Scan Results
## Step 4: Check muted findings in the scan results
1. Run a new scan
2. Navigate to the Findings page to verify muted findings
![Check muted findings](/images/mutelist-ui-9.png)
2. Check the muted findings in the scan results
![Check muted fidings](/images/mutelist-ui-9.png)
<Note>
The Advanced Mutelist configuration takes effect on subsequent scans. Existing findings are not retroactively muted.
The Mutelist configuration takes effect on the next scans.
</Note>
## YAML Configuration Examples
## Mutelist Ready To Use Examples
Below are ready-to-use examples for different cloud providers. For detailed syntax and logic explanation, see [CLI Mutelist documentation](/user-guide/cli/tutorials/mutelist#how-the-mutelist-works).
Below are examples for different cloud providers supported by Prowler App. Check how the mutelist works [here](/user-guide/cli/tutorials/mutelist#how-the-mutelist-works).
### AWS Provider
@@ -1,180 +0,0 @@
---
title: "Simple Mutelist"
---
import { VersionBadge } from "/snippets/version-badge.mdx";
<VersionBadge version="5.16.0" />
Prowler App provides Simple Mutelist, an intuitive way to mute findings directly from the Findings page without writing YAML configuration. This feature streamlines the muting workflow by allowing individual or bulk muting with just a few clicks.
## What Is Simple Mutelist?
Simple Mutelist enables users to:
- **Mute findings directly from the Findings table** using checkbox selection
- **Perform bulk muting** of multiple findings at once
- **Manage mute rules** through a dedicated interface
- **Toggle mute rules on and off** without deleting them
- **Edit mute rule justifications** after creation
<Note>
Simple Mutelist creates rules based on the finding's unique identifier (UID). For complex muting patterns based on checks, regions, tags, or regular expressions, use [Advanced Mutelist](/user-guide/tutorials/prowler-app-mute-findings) with YAML configuration.
</Note>
## Accessing the Mutelist Page
To access the Mutelist page:
1. Click "Mutelist" in the left navigation menu
The Mutelist page contains two tabs:
- **Simple:** Displays a table of mute rules created through Simple Mutelist
- **Advanced:** Provides YAML-based configuration for complex muting patterns
## Muting Findings from the Findings Page
### Muting Individual Findings
To mute a single finding:
1. Navigate to the Findings page
2. Locate the finding to mute
3. Click the actions menu (three dots) on the finding row
4. Select "Mute"
5. Enter a justification for muting this finding
6. Click "Confirm" to create the mute rule
### Muting Multiple Findings (Bulk Muting)
To mute multiple findings at once:
1. Navigate to the Findings page
2. Select findings using the checkboxes in the leftmost column
3. Click the floating "Mute" button that appears at the bottom of the screen
4. Enter a justification that applies to all selected findings
5. Click "Confirm" to create mute rules for all selected findings
<Note>
Findings that are already muted display a muted icon instead of a checkbox. These findings cannot be selected for bulk operations.
</Note>
## Managing Mute Rules
### Viewing Mute Rules
To view all mute rules:
1. Navigate to the Mutelist page
2. Select the "Simple" tab
3. The table displays all mute rules with the following information:
- **Finding UID:** The unique identifier of the muted finding
- **Justification:** The reason provided for muting
- **Enabled:** Whether the rule is currently active
- **Created:** When the rule was created
### Enabling and Disabling Mute Rules
To toggle a mute rule without deleting it:
1. Navigate to the Mutelist page
2. Select the "Simple" tab
3. Locate the mute rule
4. Use the toggle switch in the "Enabled" column to enable or disable the rule
<Note>
Disabled mute rules remain in the system but do not affect findings. Findings associated with disabled rules will appear as unmuted in subsequent scans.
</Note>
### Editing Mute Rules
To edit a mute rule's justification:
1. Navigate to the Mutelist page
2. Select the "Simple" tab
3. Click the actions menu (three dots) on the mute rule row
4. Select "Edit"
5. Update the justification
6. Click "Save" to apply changes
### Deleting Mute Rules
To permanently remove a mute rule:
1. Navigate to the Mutelist page
2. Select the "Simple" tab
3. Click the actions menu (three dots) on the mute rule row
4. Select "Delete"
5. Confirm the deletion
<Warning>
Deleting a mute rule is permanent. The finding will appear as unmuted in subsequent scans. To temporarily unmute a finding without losing the rule, disable the rule instead of deleting it.
</Warning>
## How Simple Mutelist Works
Simple Mutelist creates mute rules based on a finding's unique identifier (UID). When a mute rule is created:
- **Existing findings** matching the UID are immediately marked as muted
- **Historical findings** with the same UID are also muted
- **Future findings** from subsequent scans are automatically muted if they match the UID
### Uniqueness Constraint
Each finding UID can only have one mute rule. Attempting to create a duplicate mute rule for the same finding displays an error message indicating the rule already exists.
## Simple Mutelist vs. Advanced Mutelist
| Feature | Simple Mutelist | Advanced Mutelist |
| ------------------------ | ----------------------------------------- | ------------------------------------------------------ |
| **Configuration method** | Point-and-click interface | YAML configuration file |
| **Muting scope** | Individual finding UIDs | Patterns based on checks, regions, resources, and tags |
| **Regular expressions** | Not supported | Fully supported |
| **Bulk operations** | Checkbox selection in Findings table | YAML wildcards and patterns |
| **Best for** | Quick, ad-hoc muting of specific findings | Complex, policy-driven muting rules |
### When to Use Simple Mutelist
- Muting specific findings identified during review
- Quick suppression of known false positives
- Ad-hoc muting without YAML knowledge
### When to Use Advanced Mutelist
- Muting all findings for a specific check across regions
- Pattern-based muting using regular expressions
- Tag-based muting for environment-specific resources
- Complex rules with exceptions
## Best Practices
1. **Provide meaningful justifications:** Document why each finding is muted for audit trails and team communication
2. **Review muted findings regularly:** Periodically audit mute rules to ensure they remain valid
3. **Use disable instead of delete:** When temporarily unmuting findings, disable rules rather than deleting them
4. **Combine with Advanced Mutelist:** Use Simple Mutelist for specific findings and Advanced Mutelist for broad patterns
5. **Limit bulk muting:** Review findings individually when possible to ensure appropriate justification for each
## Troubleshooting
### Duplicate Rule Error
If an error indicates a mute rule already exists for a finding:
1. Navigate to the Mutelist page
2. Search for the existing rule in the Simple tab
3. Edit the existing rule's justification if needed, or
4. Delete the existing rule and create a new one
### Finding Still Appears Unmuted
If a muted finding still appears unmuted:
1. Verify the mute rule exists in the Mutelist page
2. Ensure the mute rule is enabled (toggle is on)
3. Check that the finding UID matches the mute rule
4. Wait for the next scan to see updated muting status on historical findings
+2 -7
View File
@@ -2,16 +2,11 @@
All notable changes to the **Prowler MCP Server** are documented in this file.
## [0.3.0] (Prowler v5.16.0)
### Added
- Add new MCP Server tools for Prowler Compliance Framework Management [(#9568)](https://github.com/prowler-cloud/prowler/pull/9568)
## [0.2.1] (UNRELEASED)
### Changed
- Update API base URL environment variable to include complete path [(#9542)](https://github.com/prowler-cloud/prowler/pull/9542)
- Standardize Prowler Hub and Docs tools format for AI optimization [(#9578)](https://github.com/prowler-cloud/prowler/pull/9578)
- Update API base URL environment variable to include complete path [(#9542)](https://github.com/prowler-cloud/prowler/pull/9300)
## [0.2.0] (Prowler v5.15.0)
+1 -2
View File
@@ -14,7 +14,6 @@ Full access to Prowler Cloud platform and self-managed Prowler App for:
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments
- **Resource Inventory**: Search and view detailed information about your audited resources
- **Muting Management**: Create and manage muting rules to suppress non-critical findings
- **Compliance Reporting**: View compliance status across frameworks and drill into requirement-level details
### Prowler Hub
@@ -23,7 +22,7 @@ Access to Prowler's comprehensive security knowledge base:
- **Check Implementation**: View the Python code that powers each security check
- **Automated Fixers**: Access remediation scripts for common security issues
- **Compliance Frameworks**: Explore mappings to **over 70 compliance standards and frameworks**
- **Provider Services**: View available services and checks for all supported Prowler providers
- **Provider Services**: View available services and checks for each cloud provider
### Prowler Documentation
+2 -2
View File
@@ -5,8 +5,8 @@ This package provides MCP tools for accessing:
- Prowler Hub: All security artifacts (detections, remediations and frameworks) supported by Prowler
"""
__version__ = "0.3.0"
__version__ = "0.1.0"
__author__ = "Prowler Team"
__email__ = "engineering@prowler.com"
__all__ = ["__version__", "__author__", "__email__"]
__all__ = ["__version__", "prowler_mcp_server"]
@@ -1,240 +0,0 @@
"""Pydantic models for simplified compliance responses."""
from typing import Any, Literal
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
from pydantic import (
BaseModel,
ConfigDict,
Field,
SerializerFunctionWrapHandler,
model_serializer,
)
class ComplianceRequirementAttribute(MinimalSerializerMixin, BaseModel):
"""Requirement attributes including associated check IDs.
Used to map requirements to the checks that validate them.
"""
model_config = ConfigDict(frozen=True)
id: str = Field(
description="Requirement identifier within the framework (e.g., '1.1', '2.1.1')"
)
name: str = Field(default="", description="Human-readable name of the requirement")
description: str = Field(
default="", description="Detailed description of the requirement"
)
check_ids: list[str] = Field(
default_factory=list,
description="List of Prowler check IDs that validate this requirement",
)
@classmethod
def from_api_response(cls, data: dict) -> "ComplianceRequirementAttribute":
"""Transform JSON:API compliance requirement attributes response to simplified format."""
attributes = data.get("attributes", {})
# Extract check_ids from the nested attributes structure
nested_attributes = attributes.get("attributes", {})
check_ids = nested_attributes.get("check_ids", [])
return cls(
id=attributes.get("id", data.get("id", "")),
name=attributes.get("name", ""),
description=attributes.get("description", ""),
check_ids=check_ids if check_ids else [],
)
class ComplianceRequirementAttributesListResponse(BaseModel):
"""Response for compliance requirement attributes list with check_ids mappings."""
model_config = ConfigDict(frozen=True)
requirements: list[ComplianceRequirementAttribute] = Field(
description="List of requirements with their associated check IDs"
)
total_count: int = Field(description="Total number of requirements")
@classmethod
def from_api_response(
cls, response: dict
) -> "ComplianceRequirementAttributesListResponse":
"""Transform JSON:API response to simplified format."""
data = response.get("data", [])
requirements = [
ComplianceRequirementAttribute.from_api_response(item) for item in data
]
return cls(
requirements=requirements,
total_count=len(requirements),
)
class ComplianceFrameworkSummary(MinimalSerializerMixin, BaseModel):
"""Simplified compliance framework overview for list operations.
Used by get_compliance_overview() to show high-level compliance status
per framework.
"""
model_config = ConfigDict(frozen=True)
id: str = Field(description="Unique identifier for this compliance overview entry")
compliance_id: str = Field(
description="Compliance framework identifier (e.g., 'cis_1.5_aws', 'pci_dss_v4.0_aws')"
)
framework: str = Field(
description="Human-readable framework name (e.g., 'CIS', 'PCI-DSS', 'HIPAA')"
)
version: str = Field(description="Framework version (e.g., '1.5', '4.0')")
total_requirements: int = Field(
default=0, description="Total number of requirements in this framework"
)
requirements_passed: int = Field(
default=0, description="Number of requirements that passed"
)
requirements_failed: int = Field(
default=0, description="Number of requirements that failed"
)
requirements_manual: int = Field(
default=0, description="Number of requirements requiring manual verification"
)
@property
def pass_percentage(self) -> float:
"""Calculate pass percentage based on passed requirements."""
if self.total_requirements == 0:
return 0.0
return round((self.requirements_passed / self.total_requirements) * 100, 1)
@property
def fail_percentage(self) -> float:
"""Calculate fail percentage based on failed requirements."""
if self.total_requirements == 0:
return 0.0
return round((self.requirements_failed / self.total_requirements) * 100, 1)
@model_serializer(mode="wrap")
def _serialize(self, handler: SerializerFunctionWrapHandler) -> dict[str, Any]:
"""Serialize with calculated percentages included."""
data = handler(self)
# Filter out None/empty values
data = {k: v for k, v in data.items() if v is not None and v != "" and v != []}
# Add calculated percentages
data["pass_percentage"] = self.pass_percentage
data["fail_percentage"] = self.fail_percentage
return data
@classmethod
def from_api_response(cls, data: dict) -> "ComplianceFrameworkSummary":
"""Transform JSON:API compliance overview response to simplified format."""
attributes = data.get("attributes", {})
# The compliance_id field may be in attributes or use the "id" field from attributes
compliance_id = attributes.get("id", data.get("id", ""))
return cls(
id=data["id"],
compliance_id=compliance_id,
framework=attributes.get("framework", ""),
version=attributes.get("version", ""),
total_requirements=attributes.get("total_requirements", 0),
requirements_passed=attributes.get("requirements_passed", 0),
requirements_failed=attributes.get("requirements_failed", 0),
requirements_manual=attributes.get("requirements_manual", 0),
)
class ComplianceRequirement(MinimalSerializerMixin, BaseModel):
"""Individual compliance requirement with its status.
Used by get_compliance_framework_state_details() to show requirement-level breakdown.
"""
model_config = ConfigDict(frozen=True)
id: str = Field(
description="Requirement identifier within the framework (e.g., '1.1', '2.1.1')"
)
description: str = Field(
description="Human-readable description of the requirement"
)
status: Literal["FAIL", "PASS", "MANUAL"] = Field(
description="Requirement status: FAIL (not compliant), PASS (compliant), MANUAL (requires manual verification)"
)
@classmethod
def from_api_response(cls, data: dict) -> "ComplianceRequirement":
"""Transform JSON:API compliance requirement response to simplified format."""
attributes = data.get("attributes", {})
return cls(
id=attributes.get("id", data.get("id", "")),
description=attributes.get("description", ""),
status=attributes.get("status", "MANUAL"),
)
class ComplianceFrameworksListResponse(BaseModel):
"""Response for compliance frameworks list with aggregated statistics."""
model_config = ConfigDict(frozen=True)
frameworks: list[ComplianceFrameworkSummary] = Field(
description="List of compliance frameworks with their status"
)
total_count: int = Field(description="Total number of frameworks returned")
@classmethod
def from_api_response(cls, response: dict) -> "ComplianceFrameworksListResponse":
"""Transform JSON:API response to simplified format."""
data = response.get("data", [])
frameworks = [
ComplianceFrameworkSummary.from_api_response(item) for item in data
]
return cls(
frameworks=frameworks,
total_count=len(frameworks),
)
class ComplianceRequirementsListResponse(BaseModel):
"""Response for compliance requirements list queries."""
model_config = ConfigDict(frozen=True)
requirements: list[ComplianceRequirement] = Field(
description="List of requirements with their status"
)
total_count: int = Field(description="Total number of requirements")
passed_count: int = Field(description="Number of requirements with PASS status")
failed_count: int = Field(description="Number of requirements with FAIL status")
manual_count: int = Field(description="Number of requirements with MANUAL status")
@classmethod
def from_api_response(cls, response: dict) -> "ComplianceRequirementsListResponse":
"""Transform JSON:API response to simplified format."""
data = response.get("data", [])
requirements = [ComplianceRequirement.from_api_response(item) for item in data]
# Calculate counts
passed = sum(1 for r in requirements if r.status == "PASS")
failed = sum(1 for r in requirements if r.status == "FAIL")
manual = sum(1 for r in requirements if r.status == "MANUAL")
return cls(
requirements=requirements,
total_count=len(requirements),
passed_count=passed,
failed_count=failed,
manual_count=manual,
)
@@ -1,409 +0,0 @@
"""Compliance framework tools for Prowler App MCP Server.
This module provides tools for viewing compliance status and requirement details
across all cloud providers.
"""
from typing import Any
from prowler_mcp_server.prowler_app.models.compliance import (
ComplianceFrameworksListResponse,
ComplianceRequirementAttributesListResponse,
ComplianceRequirementsListResponse,
)
from prowler_mcp_server.prowler_app.tools.base import BaseTool
from pydantic import Field
class ComplianceTools(BaseTool):
"""Tools for compliance framework operations.
Provides tools for:
- get_compliance_overview: Get high-level compliance status across all frameworks
- get_compliance_framework_state_details: Get detailed requirement-level breakdown for a specific framework
"""
async def _get_latest_scan_id_for_provider(self, provider_id: str) -> str:
"""Get the latest completed scan_id for a given provider.
Args:
provider_id: Prowler's internal UUID for the provider
Returns:
The scan_id of the latest completed scan for the provider.
Raises:
ValueError: If no completed scans are found for the provider.
"""
scan_params = {
"filter[provider]": provider_id,
"filter[state]": "completed",
"sort": "-inserted_at",
"page[size]": 1,
"page[number]": 1,
}
clean_scan_params = self.api_client.build_filter_params(scan_params)
scans_response = await self.api_client.get("/scans", params=clean_scan_params)
scans_data = scans_response.get("data", [])
if not scans_data:
raise ValueError(
f"No completed scans found for provider {provider_id}. "
"Run a scan first using prowler_app_trigger_scan."
)
scan_id = scans_data[0]["id"]
return scan_id
async def get_compliance_overview(
self,
scan_id: str | None = Field(
default=None,
description="UUID of a specific scan to get compliance data for. Required if provider_id is not specified. Use `prowler_app_list_scans` to find scan IDs.",
),
provider_id: str | None = Field(
default=None,
description="Prowler's internal UUID (v4) for a specific provider. If provided without scan_id, the tool will automatically find the latest completed scan for this provider. Use `prowler_app_search_providers` tool to find provider IDs.",
),
) -> dict[str, Any]:
"""Get high-level compliance overview across all frameworks for a specific scan.
This tool provides a HIGH-LEVEL OVERVIEW of compliance status across all frameworks.
Use this when you need to understand overall compliance posture before drilling into
specific framework details.
You have two options to specify the scan context:
1. Provide a specific scan_id to get compliance data for that scan.
2. Provide a provider_id to get compliance data from the latest completed scan for that provider.
The markdown report includes:
1. Summary Statistics:
- Total number of compliance frameworks evaluated
- Overall compliance metrics across all frameworks
2. Per-Framework Breakdown:
- Framework name, version, and compliance ID
- Requirements passed/failed/manual counts
- Pass percentage for quick assessment
Workflow:
1. Use this tool to get an overview of all compliance frameworks
2. Use prowler_app_get_compliance_framework_state_details with a specific compliance_id to see which requirements failed
"""
if not scan_id and not provider_id:
return {
"error": "Either scan_id or provider_id must be provided. Use prowler_app_search_providers to find provider IDs or prowler_app_list_scans to find scan IDs."
}
elif scan_id and provider_id:
return {
"error": "Provide either scan_id or provider_id, not both. To get compliance data for a specific scan, use scan_id. To get data for the latest scan of a provider, use provider_id."
}
elif not scan_id and provider_id:
try:
scan_id = await self._get_latest_scan_id_for_provider(provider_id)
except ValueError as e:
return {"error": str(e)}
params: dict[str, Any] = {"filter[scan_id]": scan_id}
clean_params = self.api_client.build_filter_params(params)
# Get API response
api_response = await self.api_client.get(
"/compliance-overviews", params=clean_params
)
frameworks_response = ComplianceFrameworksListResponse.from_api_response(
api_response
)
# Build markdown report
frameworks = frameworks_response.frameworks
total_frameworks = frameworks_response.total_count
if total_frameworks == 0:
return {"report": "# Compliance Overview\n\nNo compliance frameworks found"}
# Calculate aggregate statistics
total_requirements = sum(f.total_requirements for f in frameworks)
total_passed = sum(f.requirements_passed for f in frameworks)
total_failed = sum(f.requirements_failed for f in frameworks)
total_manual = sum(f.requirements_manual for f in frameworks)
overall_pass_pct = (
round((total_passed / total_requirements) * 100, 1)
if total_requirements > 0
else 0
)
# Build report
report_lines = [
"# Compliance Overview",
"",
"## Summary Statistics",
f"- **Frameworks Evaluated**: {total_frameworks}",
f"- **Total Requirements**: {total_requirements:,}",
f"- **Passed**: {total_passed:,} ({overall_pass_pct}%)",
f"- **Failed**: {total_failed:,}",
f"- **Manual Review**: {total_manual:,}",
"",
"## Framework Breakdown",
"",
]
# Sort frameworks by fail count (most failures first)
sorted_frameworks = sorted(
frameworks, key=lambda f: f.requirements_failed, reverse=True
)
for fw in sorted_frameworks:
status_indicator = "PASS" if fw.requirements_failed == 0 else "FAIL"
report_lines.append(f"### {fw.framework} {fw.version}")
report_lines.append(f"- **Compliance ID**: `{fw.compliance_id}`")
report_lines.append(f"- **Status**: {status_indicator}")
report_lines.append(
f"- **Requirements**: {fw.requirements_passed}/{fw.total_requirements} passed ({fw.pass_percentage}%)"
)
if fw.requirements_failed > 0:
report_lines.append(f"- **Failed**: {fw.requirements_failed}")
if fw.requirements_manual > 0:
report_lines.append(f"- **Manual Review**: {fw.requirements_manual}")
report_lines.append("")
return {"report": "\n".join(report_lines)}
async def _get_requirement_check_ids_mapping(
self, compliance_id: str
) -> dict[str, list[str]]:
"""Get mapping of requirement IDs to their associated check IDs.
Args:
compliance_id: The compliance framework ID.
Returns:
Dictionary mapping requirement ID to list of check IDs.
"""
params: dict[str, Any] = {
"filter[compliance_id]": compliance_id,
"fields[compliance-requirements-attributes]": "id,attributes",
}
clean_params = self.api_client.build_filter_params(params)
api_response = await self.api_client.get(
"/compliance-overviews/attributes", params=clean_params
)
attributes_response = (
ComplianceRequirementAttributesListResponse.from_api_response(api_response)
)
# Build mapping: requirement_id -> [check_ids]
return {req.id: req.check_ids for req in attributes_response.requirements}
async def _get_failed_finding_ids_for_checks(
self,
check_ids: list[str],
scan_id: str,
) -> list[str]:
"""Get all failed finding IDs for a list of check IDs.
Args:
check_ids: List of Prowler check IDs.
scan_id: The scan ID to filter findings.
Returns:
List of all finding IDs with FAIL status.
"""
if not check_ids:
return []
all_finding_ids: list[str] = []
page_number = 1
page_size = 100
while True:
# Query findings endpoint with check_id filter and FAIL status
params: dict[str, Any] = {
"filter[scan]": scan_id,
"filter[check_id__in]": ",".join(check_ids),
"filter[status]": "FAIL",
"fields[findings]": "uid",
"page[size]": page_size,
"page[number]": page_number,
}
clean_params = self.api_client.build_filter_params(params)
api_response = await self.api_client.get("/findings", params=clean_params)
findings = api_response.get("data", [])
if not findings:
break
all_finding_ids.extend([f["id"] for f in findings])
# Check if we've reached the last page
if len(findings) < page_size:
break
page_number += 1
return all_finding_ids
async def get_compliance_framework_state_details(
self,
compliance_id: str = Field(
description="Compliance framework ID to get details for (e.g., 'cis_1.5_aws', 'pci_dss_v4.0_aws'). You can get compliance IDs from prowler_app_get_compliance_overview or consulting Prowler Hub/Prowler Documentation that you can also find in form of tools in this MCP Server",
),
scan_id: str | None = Field(
default=None,
description="UUID of a specific scan to get compliance data for. Required if provider_id is not specified.",
),
provider_id: str | None = Field(
default=None,
description="Prowler's internal UUID (v4) for a specific provider. If provided without scan_id, the tool will automatically find the latest completed scan for this provider. Use `prowler_app_search_providers` tool to find provider IDs.",
),
) -> dict[str, Any]:
"""Get detailed requirement-level breakdown for a specific compliance framework.
IMPORTANT: This tool returns DETAILED requirement information for a single compliance framework,
focusing on FAILED requirements and their associated FAILED finding IDs.
Use this after prowler_app_get_compliance_overview to drill down into specific frameworks.
The markdown report includes:
1. Framework Summary:
- Compliance ID and scan ID used
- Overall pass/fail/manual counts
2. Failed Requirements Breakdown:
- Each failed requirement's ID and description
- Associated failed finding IDs for each failed requirement
- Use prowler_app_get_finding_details with these finding IDs for more details and remediation guidance
Default behavior:
- Requires either scan_id OR provider_id
- With provider_id (no scan_id): Automatically finds the latest completed scan for that provider
- With scan_id: Uses that specific scan's compliance data
- Only shows failed requirements with their associated failed finding IDs
Workflow:
1. Use prowler_app_get_compliance_overview to identify frameworks with failures
2. Use this tool with the compliance_id to see failed requirements and their finding IDs
3. Use prowler_app_get_finding_details with the finding IDs to get remediation guidance
"""
# Validate that either scan_id or provider_id is provided
if not scan_id and not provider_id:
return {
"error": "Either scan_id or provider_id must be provided. Use prowler_app_search_providers to find provider IDs or prowler_app_list_scans to find scan IDs."
}
# Resolve provider_id to latest scan_id if needed
resolved_scan_id = scan_id
if not scan_id and provider_id:
try:
resolved_scan_id = await self._get_latest_scan_id_for_provider(
provider_id
)
except ValueError as e:
return {"error": str(e)}
# Build params for requirements endpoint
params: dict[str, Any] = {
"filter[scan_id]": resolved_scan_id,
"filter[compliance_id]": compliance_id,
}
params["fields[compliance-requirements-details]"] = "id,description,status"
clean_params = self.api_client.build_filter_params(params)
# Get API response
api_response = await self.api_client.get(
"/compliance-overviews/requirements", params=clean_params
)
requirements_response = ComplianceRequirementsListResponse.from_api_response(
api_response
)
requirements = requirements_response.requirements
if not requirements:
return {
"report": f"# Compliance Framework Details\n\n**Compliance ID**: `{compliance_id}`\n\nNo requirements found for this compliance framework and scan combination."
}
# Get failed requirements
failed_reqs = [r for r in requirements if r.status == "FAIL"]
# Get requirement -> check_ids mapping from attributes endpoint
requirement_check_mapping: dict[str, list[str]] = {}
if failed_reqs:
requirement_check_mapping = await self._get_requirement_check_ids_mapping(
compliance_id
)
# For each failed requirement, get the failed finding IDs
failed_req_findings: dict[str, list[str]] = {}
for req in failed_reqs:
check_ids = requirement_check_mapping.get(req.id, [])
if check_ids:
finding_ids = await self._get_failed_finding_ids_for_checks(
check_ids, resolved_scan_id
)
failed_req_findings[req.id] = finding_ids
# Calculate counts
total_count = len(requirements)
passed_count = sum(1 for r in requirements if r.status == "PASS")
failed_count = len(failed_reqs)
manual_count = sum(1 for r in requirements if r.status == "MANUAL")
# Build markdown report
pass_pct = (
round((passed_count / total_count) * 100, 1) if total_count > 0 else 0
)
report_lines = [
"# Compliance Framework Details",
"",
f"**Compliance ID**: `{compliance_id}`",
f"**Scan ID**: `{resolved_scan_id}`",
"",
"## Summary",
f"- **Total Requirements**: {total_count}",
f"- **Passed**: {passed_count} ({pass_pct}%)",
f"- **Failed**: {failed_count}",
f"- **Manual Review**: {manual_count}",
"",
]
# Show failed requirements with their finding IDs (most actionable)
if failed_reqs:
report_lines.append("## Failed Requirements")
report_lines.append("")
for req in failed_reqs:
report_lines.append(f"### {req.id}")
report_lines.append(f"**Description**: {req.description}")
finding_ids = failed_req_findings.get(req.id, [])
if finding_ids:
report_lines.append(f"**Failed Finding IDs** ({len(finding_ids)}):")
for fid in finding_ids:
report_lines.append(f" - `{fid}`")
else:
report_lines.append("**Failed Finding IDs**: None found")
report_lines.append("")
report_lines.append(
"*Use `prowler_app_get_finding_details` with these finding IDs to get remediation guidance.*"
)
report_lines.append("")
if manual_count > 0:
manual_reqs = [r for r in requirements if r.status == "MANUAL"]
report_lines.append("## Requirements Requiring Manual Review")
report_lines.append("")
for req in manual_reqs:
report_lines.append(f"- **{req.id}**: {req.description}")
report_lines.append("")
return {"report": "\n".join(report_lines)}
@@ -6,13 +6,14 @@ across all providers.
from typing import Any
from pydantic import Field
from prowler_mcp_server.prowler_app.models.resources import (
DetailedResource,
ResourcesListResponse,
ResourcesMetadataResponse,
)
from prowler_mcp_server.prowler_app.tools.base import BaseTool
from pydantic import Field
class ResourcesTools(BaseTool):
@@ -187,7 +188,7 @@ class ResourcesTools(BaseTool):
1. Configuration Details:
- metadata: Provider-specific configuration (tags, policies, encryption settings, network rules)
- partition: Provider-specific partition/region grouping (e.g., aws, aws-cn, aws-eusc, aws-us-gov for AWS)
- partition: Provider-specific partition/region grouping (e.g., aws, aws-cn, aws-us-gov for AWS)
2. Temporal Tracking:
- inserted_at: When Prowler first discovered this resource
@@ -1,3 +1,5 @@
from typing import List, Optional
import httpx
from prowler_mcp_server import __version__
from pydantic import BaseModel, Field
@@ -9,7 +11,7 @@ class SearchResult(BaseModel):
path: str = Field(description="Document path")
title: str = Field(description="Document title")
url: str = Field(description="Documentation URL")
highlights: list[str] = Field(
highlights: List[str] = Field(
description="Highlighted content snippets showing query matches with <mark><b> tags",
default_factory=list,
)
@@ -52,7 +54,7 @@ class ProwlerDocsSearchEngine:
},
)
def search(self, query: str, page_size: int = 5) -> list[SearchResult]:
def search(self, query: str, page_size: int = 5) -> List[SearchResult]:
"""
Search documentation using Mintlify API.
@@ -61,7 +63,7 @@ class ProwlerDocsSearchEngine:
page_size: Maximum number of results to return
Returns:
list of search results
List of search results
"""
try:
# Construct request body
@@ -137,7 +139,7 @@ class ProwlerDocsSearchEngine:
print(f"Search error: {e}")
return []
def get_document(self, doc_path: str) -> str | None:
def get_document(self, doc_path: str) -> Optional[str]:
"""
Get full document content from Mintlify documentation.
@@ -1,8 +1,6 @@
from typing import Any
from typing import Any, List
from fastmcp import FastMCP
from pydantic import Field
from prowler_mcp_server.prowler_documentation.search_engine import (
ProwlerDocsSearchEngine,
)
@@ -14,44 +12,46 @@ prowler_docs_search_engine = ProwlerDocsSearchEngine()
@docs_mcp_server.tool()
def search(
term: str = Field(description="The term to search for in the documentation"),
page_size: int = Field(
5,
description="Number of top results to return to return. It must be between 1 and 20.",
gt=1,
lt=20,
),
) -> list[dict[str, Any]]:
"""Search in Prowler documentation.
query: str,
page_size: int = 5,
) -> List[dict[str, Any]]:
"""
Search in Prowler documentation.
This tool searches through the official Prowler documentation
to find relevant information about everything related to Prowler.
to find relevant information about security checks, cloud providers,
compliance frameworks, and usage instructions.
Uses fulltext search to find the most relevant documentation pages
based on your query.
Args:
query: The search query
page_size: Number of top results to return (default: 5)
Returns:
List of search results with highlights showing matched terms (in <mark><b> tags)
"""
return prowler_docs_search_engine.search(term, page_size) # type: ignore In the hint we cannot put SearchResult type because JSON API MCP Generator cannot handle Pydantic models yet
return prowler_docs_search_engine.search(query, page_size)
@docs_mcp_server.tool()
def get_document(
doc_path: str = Field(
description="Path to the documentation file to retrieve. It is the same as the 'path' field of the search results. Use `prowler_docs_search` to find the path first."
),
) -> dict[str, str]:
"""Retrieve the full content of a Prowler documentation file.
doc_path: str,
) -> str:
"""
Retrieve the full content of a Prowler documentation file.
Use this after searching to get the complete content of a specific
documentation file.
Args:
doc_path: Path to the documentation file. It is the same as the "path" field of the search results.
Returns:
Full content of the documentation file in markdown format.
Full content of the documentation file
"""
content: str | None = prowler_docs_search_engine.get_document(doc_path)
content = prowler_docs_search_engine.get_document(doc_path)
if content is None:
return {"error": f"Document '{doc_path}' not found."}
else:
return {"content": content}
raise ValueError(f"Document not found: {doc_path}")
return content
@@ -4,10 +4,10 @@ Prowler Hub MCP module
Provides access to Prowler Hub API for security checks and compliance frameworks.
"""
from typing import Any, Optional
import httpx
from fastmcp import FastMCP
from pydantic import Field
from prowler_mcp_server import __version__
# Initialize FastMCP for Prowler Hub
@@ -55,90 +55,109 @@ def github_check_path(provider_id: str, check_id: str, suffix: str) -> str:
return f"{GITHUB_RAW_BASE}/{provider_id}/services/{service_id}/{check_id}/{check_id}{suffix}"
# Security Check Tools
@hub_mcp_server.tool()
async def list_checks(
providers: list[str] = Field(
default=[],
description="Filter by Prowler provider IDs. Example: ['aws', 'azure']. Use `prowler_hub_list_providers` to get available provider IDs.",
),
services: list[str] = Field(
default=[],
description="Filter by provider services. Example: ['s3', 'ec2', 'keyvault']. Use `prowler_hub_get_provider_services` to get available services for a provider.",
),
severities: list[str] = Field(
default=[],
description="Filter by severity levels. Example: ['high', 'critical']. Available: 'low', 'medium', 'high', 'critical'.",
),
categories: list[str] = Field(
default=[],
description="Filter by security categories. Example: ['encryption', 'internet-exposed'].",
),
compliances: list[str] = Field(
default=[],
description="Filter by compliance framework IDs. Example: ['cis_4.0_aws', 'ens_rd2022_azure']. Use `prowler_hub_list_compliances` to get available compliance IDs.",
),
) -> dict:
"""List security Prowler Checks with filtering capabilities.
IMPORTANT: This tool returns LIGHTWEIGHT check data. Use this for fast browsing and filtering.
For complete details including risk, remediation guidance, and categories use `prowler_hub_get_check_details`.
IMPORTANT: An unfiltered request returns 1000+ checks. Use filters to narrow results.
async def get_check_filters() -> dict[str, Any]:
"""
Get available values for filtering for tool `get_checks`. Recommended to use before calling `get_checks` to get the available values for the filters.
Returns:
Available filter options including providers, types, services, severities,
categories, and compliance frameworks with their respective counts
"""
try:
response = prowler_hub_client.get("/check/filters")
response.raise_for_status()
filters = response.json()
return {"filters": filters}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Security Check Tools
@hub_mcp_server.tool()
async def get_checks(
providers: Optional[str] = None,
types: Optional[str] = None,
services: Optional[str] = None,
severities: Optional[str] = None,
categories: Optional[str] = None,
compliances: Optional[str] = None,
ids: Optional[str] = None,
fields: Optional[str] = "id,service,severity,title,description,risk",
) -> dict[str, Any]:
"""
List security Prowler Checks. The list can be filtered by the parameters defined for the tool.
It is recommended to use the tool `get_check_filters` to get the available values for the filters.
A not filtered request will return more than 1000 checks, so it is recommended to use the filters.
Args:
providers: Filter by Prowler provider IDs. Example: "aws,azure". Use the tool `list_providers` to get the available providers IDs.
types: Filter by check types.
services: Filter by provider services IDs. Example: "s3,keyvault". Use the tool `list_providers` to get the available services IDs in a provider.
severities: Filter by severity levels. Example: "medium,high". Available values are "low", "medium", "high", "critical".
categories: Filter by categories. Example: "cluster-security,encryption".
compliances: Filter by compliance framework IDs. Example: "cis_4.0_aws,ens_rd2022_azure".
ids: Filter by specific check IDs. Example: "s3_bucket_level_public_access_block".
fields: Specify which fields from checks metadata to return (id is always included). Example: "id,title,description,risk".
Available values are "id", "title", "description", "provider", "type", "service", "subservice", "severity", "risk", "reference", "remediation", "services_required", "aws_arn_template", "notes", "categories", "default_value", "resource_type", "related_url", "depends_on", "related_to", "fixer".
The default parameters are "id,title,description".
If null, all fields will be returned.
Returns:
List of security checks matching the filters. The structure is as follows:
{
"count": N,
"checks": [
{
"id": "check_id",
"provider": "provider_id",
"title": "Human-readable check title",
"severity": "critical|high|medium|low",
},
{"id": "check_id_1", "title": "check_title_1", "description": "check_description_1", ...},
{"id": "check_id_2", "title": "check_title_2", "description": "check_description_2", ...},
{"id": "check_id_3", "title": "check_title_3", "description": "check_description_3", ...},
...
]
}
Useful Example Workflow:
1. Use `prowler_hub_list_providers` to see available Prowler providers
2. Use `prowler_hub_get_provider_services` to see services for a provider
3. Use this tool with filters to find relevant checks
4. Use `prowler_hub_get_check_details` to get complete information for a specific check
"""
# Lightweight fields for listing
lightweight_fields = "id,title,severity,provider"
params: dict[str, str] = {"fields": lightweight_fields}
params: dict[str, str] = {}
if providers:
params["providers"] = ",".join(providers)
params["providers"] = providers
if types:
params["types"] = types
if services:
params["services"] = ",".join(services)
params["services"] = services
if severities:
params["severities"] = ",".join(severities)
params["severities"] = severities
if categories:
params["categories"] = ",".join(categories)
params["categories"] = categories
if compliances:
params["compliances"] = ",".join(compliances)
params["compliances"] = compliances
if ids:
params["ids"] = ids
if fields:
params["fields"] = fields
try:
response = prowler_hub_client.get("/check", params=params)
response.raise_for_status()
checks = response.json()
# Return checks as a lightweight list
checks_list = []
checks_dict = {}
for check in checks:
check_data = {
"id": check["id"],
"provider": check["provider"],
"title": check["title"],
"severity": check["severity"],
}
checks_list.append(check_data)
check_data = {}
# Always include the id field as it's mandatory for the response structure
if "id" in check:
check_data["id"] = check["id"]
return {"count": len(checks), "checks": checks_list}
# Include other requested fields
for field in fields.split(","):
if field != "id" and field in check: # Skip id since it's already added
check_data[field] = check[field]
checks_dict[check["id"]] = check_data
return {"count": len(checks), "checks": checks_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
@@ -148,220 +167,60 @@ async def list_checks(
@hub_mcp_server.tool()
async def semantic_search_checks(
term: str = Field(
description="Search term. Examples: 'public access', 'encryption', 'MFA', 'logging'.",
),
) -> dict:
"""Search for security checks using free-text search across all metadata.
async def get_check_raw_metadata(
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the raw check metadata JSON, this is a low level version of the tool `get_checks`.
It is recommended to use the tool `get_checks` filtering about the `ids` parameter instead of using this tool.
IMPORTANT: This tool returns LIGHTWEIGHT check data. Use this for discovering checks by topic.
For complete details including risk, remediation guidance, and categories use `prowler_hub_get_check_details`.
Searches across check titles, descriptions, risk statements, remediation guidance,
and other text fields. Use this when you don't know the exact check ID or want to
explore checks related to a topic.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (folder and base filename).
Returns:
{
"count": N,
"checks": [
{
"id": "check_id",
"provider": "provider_id",
"title": "Human-readable check title",
"severity": "critical|high|medium|low",
},
...
]
}
Useful Example Workflow:
1. Use this tool to search for checks by keyword or topic
2. Use `prowler_hub_list_checks` with filters for more targeted browsing
3. Use `prowler_hub_get_check_details` to get complete information for a specific check
Raw metadata JSON as stored in Prowler.
"""
try:
response = prowler_hub_client.get("/check/search", params={"term": term})
response.raise_for_status()
checks = response.json()
# Return checks as a lightweight list
checks_list = []
for check in checks:
check_data = {
"id": check["id"],
"provider": check["provider"],
"title": check["title"],
"severity": check["severity"],
if provider_id and check_id:
url = github_check_path(provider_id, check_id, ".metadata.json")
try:
resp = github_raw_client.get(url)
resp.raise_for_status()
return resp.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {
"error": f"Check {check_id} not found in Prowler",
}
else:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {
"error": f"Error fetching check {check_id} from Prowler: {str(e)}",
}
checks_list.append(check_data)
return {"count": len(checks), "checks": checks_list}
except httpx.HTTPStatusError as e:
else:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
"error": "Provider ID and check ID are required",
}
except Exception as e:
return {"error": str(e)}
@hub_mcp_server.tool()
async def get_check_details(
check_id: str = Field(
description="The check ID to retrieve details for. Example: 's3_bucket_level_public_access_block'"
),
) -> dict:
"""Retrieve comprehensive details about a specific security check by its ID.
IMPORTANT: This tool returns COMPLETE check details.
Use this after finding a specific check ID, you can get it via `prowler_hub_list_checks` or `prowler_hub_semantic_search_checks`.
Returns:
{
"id": "string",
"title": "string",
"description": "string",
"provider": "string",
"service": "string",
"severity": "low",
"risk": "string",
"reference": [
"string"
],
"additional_urls": [
"string"
],
"remediation": {
"cli": {
"description": "string"
},
"terraform": {
"description": "string"
},
"nativeiac": {
"description": "string"
},
"other": {
"description": "string"
},
"wui": {
"description": "string",
"reference": "string"
}
},
"services_required": [
"string"
],
"notes": "string",
"compliances": [
{
"name": "string",
"id": "string"
}
],
"categories": [
"string"
],
"resource_type": "string",
"related_url": "string",
"fixer": bool
}
Useful Example Workflow:
1. Use `prowler_hub_list_checks` or `prowler_hub_search_checks` to find check IDs
2. Use this tool with the check 'id' to get complete information including remediation guidance
"""
try:
response = prowler_hub_client.get(f"/check/{check_id}")
response.raise_for_status()
check = response.json()
if not check:
return {"error": f"Check '{check_id}' not found"}
# Build response with only non-empty fields to save tokens
result = {}
# Core fields
result["id"] = check["id"]
if check.get("title"):
result["title"] = check["title"]
if check.get("description"):
result["description"] = check["description"]
if check.get("provider"):
result["provider"] = check["provider"]
if check.get("service"):
result["service"] = check["service"]
if check.get("severity"):
result["severity"] = check["severity"]
if check.get("risk"):
result["risk"] = check["risk"]
if check.get("resource_type"):
result["resource_type"] = check["resource_type"]
# List fields
if check.get("reference"):
result["reference"] = check["reference"]
if check.get("additional_urls"):
result["additional_urls"] = check["additional_urls"]
if check.get("services_required"):
result["services_required"] = check["services_required"]
if check.get("categories"):
result["categories"] = check["categories"]
if check.get("compliances"):
result["compliances"] = check["compliances"]
# Other fields
if check.get("notes"):
result["notes"] = check["notes"]
if check.get("related_url"):
result["related_url"] = check["related_url"]
if check.get("fixer") is not None:
result["fixer"] = check["fixer"]
# Remediation - filter out empty nested values
remediation = check.get("remediation", {})
if remediation:
filtered_remediation = {}
for key, value in remediation.items():
if value and isinstance(value, dict):
# Filter out empty values within nested dict
filtered_value = {k: v for k, v in value.items() if v}
if filtered_value:
filtered_remediation[key] = filtered_value
elif value:
filtered_remediation[key] = value
if filtered_remediation:
result["remediation"] = filtered_remediation
return result
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
@hub_mcp_server.tool()
async def get_check_code(
provider_id: str = Field(
description="Prowler Provider ID. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
),
check_id: str = Field(
description="The check ID. Example: 's3_bucket_public_access'. Get IDs from `prowler_hub_list_checks` or `prowler_hub_search_checks`.",
),
) -> dict:
"""Fetch the Python implementation code of a Prowler security check.
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the check implementation Python code from Prowler.
The check code shows exactly how Prowler evaluates resources for security issues.
Use this to understand check logic, customize checks, or create new ones.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
Returns:
{
"content": "Python source code of the check implementation"
}
Dict with the code content as text.
"""
if provider_id and check_id:
url = github_check_path(provider_id, check_id, ".py")
@@ -392,29 +251,18 @@ async def get_check_code(
@hub_mcp_server.tool()
async def get_check_fixer(
provider_id: str = Field(
description="Prowler Provider ID. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
),
check_id: str = Field(
description="The check ID. Example: 's3_bucket_public_access'. Get IDs from `prowler_hub_list_checks` or `prowler_hub_search_checks`.",
),
) -> dict:
"""Fetch the auto-remediation (fixer) code for a Prowler security check.
provider_id: str,
check_id: str,
) -> dict[str, Any]:
"""
Fetch the check fixer Python code from Prowler, if it exists.
IMPORTANT: Not all checks have fixers. A "fixer not found" response means the check
doesn't have auto-remediation code - this is normal for many checks.
Fixer code provides automated remediation that can fix security issues detected by checks.
Use this to understand how to programmatically remediate findings.
Args:
provider_id: Prowler provider ID (e.g., "aws", "azure").
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
Returns:
{
"content": "Python source code of the auto-remediation implementation"
}
Or if no fixer exists:
{
"error": "Fixer not found for check {check_id}"
}
Dict with fixer content as text if present, existence flag.
"""
if provider_id and check_id:
url = github_check_path(provider_id, check_id, "_fixer.py")
@@ -447,66 +295,95 @@ async def get_check_fixer(
}
# Compliance Framework Tools
@hub_mcp_server.tool()
async def list_compliances(
provider: list[str] = Field(
default=[],
description="Filter by cloud provider. Example: ['aws']. Use `prowler_hub_list_providers` to get available provider IDs.",
),
) -> dict:
"""List compliance frameworks supported by Prowler.
async def search_checks(term: str) -> dict[str, Any]:
"""
Search the term across all text properties of check metadata.
IMPORTANT: This tool returns LIGHTWEIGHT compliance data. Use this for fast browsing and filtering.
For complete details including requirements use `prowler_hub_get_compliance_details`.
Compliance frameworks define sets of security requirements that checks map to.
Use this to discover available frameworks for compliance reporting.
WARNING: An unfiltered request may return a large number of frameworks. Use the provider with not more than 3 different providers to make easier the response handling.
Args:
term: Search term to find in check titles, descriptions, and other text fields
Returns:
List of checks matching the search term
"""
try:
response = prowler_hub_client.get("/check/search", params={"term": term})
response.raise_for_status()
checks = response.json()
return {
"count": len(checks),
"checks": checks,
}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
except Exception as e:
return {"error": str(e)}
# Compliance Framework Tools
@hub_mcp_server.tool()
async def get_compliance_frameworks(
provider: Optional[str] = None,
fields: Optional[
str
] = "id,framework,provider,description,total_checks,total_requirements",
) -> dict[str, Any]:
"""
List and filter compliance frameworks. The list can be filtered by the parameters defined for the tool.
Args:
provider: Filter by one Prowler provider ID. Example: "aws". Use the tool `list_providers` to get the available providers IDs.
fields: Specify which fields to return (id is always included). Example: "id,provider,description,version".
It is recommended to run with the default parameters because the full response is too large.
Available values are "id", "framework", "provider", "description", "total_checks", "total_requirements", "created_at", "updated_at".
The default parameters are "id,framework,provider,description,total_checks,total_requirements".
If null, all fields will be returned.
Returns:
List of compliance frameworks. The structure is as follows:
{
"count": N,
"compliances": [
{
"id": "cis_4.0_aws",
"name": "CIS Amazon Web Services Foundations Benchmark v4.0",
"provider": "aws",
},
...
]
"frameworks": {
"framework_id": {
"id": "framework_id",
"provider": "provider_id",
"description": "framework_description",
"version": "framework_version"
}
}
}
Useful Example Workflow:
1. Use `prowler_hub_list_providers` to see available cloud providers
2. Use this tool to browse compliance frameworks
3. Use `prowler_hub_get_compliance_details` with the compliance 'id' to get complete information
"""
# Lightweight fields for listing
lightweight_fields = "id,name,provider"
params: dict[str, str] = {"fields": lightweight_fields}
params = {}
if provider:
params["provider"] = ",".join(provider)
params["provider"] = provider
if fields:
params["fields"] = fields
try:
response = prowler_hub_client.get("/compliance", params=params)
response.raise_for_status()
compliances = response.json()
frameworks = response.json()
# Return compliances as a lightweight list
compliances_list = []
for compliance in compliances:
compliance_data = {
"id": compliance["id"],
"name": compliance["name"],
"provider": compliance["provider"],
}
compliances_list.append(compliance_data)
frameworks_dict = {}
for framework in frameworks:
framework_data = {}
# Always include the id field as it's mandatory for the response structure
if "id" in framework:
framework_data["id"] = framework["id"]
return {"count": len(compliances), "compliances": compliances_list}
# Include other requested fields
for field in fields.split(","):
if (
field != "id" and field in framework
): # Skip id since it's already added
framework_data[field] = framework[field]
frameworks_dict[framework["id"]] = framework_data
return {"count": len(frameworks), "frameworks": frameworks_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
@@ -516,140 +393,27 @@ async def list_compliances(
@hub_mcp_server.tool()
async def semantic_search_compliances(
term: str = Field(
description="Search term. Examples: 'CIS', 'HIPAA', 'PCI', 'GDPR', 'SOC2', 'NIST'.",
),
) -> dict:
"""Search for compliance frameworks using free-text search.
async def search_compliance_frameworks(term: str) -> dict[str, Any]:
"""
Search compliance frameworks by term.
IMPORTANT: This tool returns LIGHTWEIGHT compliance data. Use this for discovering frameworks by topic.
For complete details including requirements use `prowler_hub_get_compliance_details`.
Searches across framework names, descriptions, and metadata. Use this when you
want to find frameworks related to a specific regulation, standard, or topic.
Args:
term: Search term to find in framework names and descriptions
Returns:
{
"count": N,
"compliances": [
{
"id": "cis_4.0_aws",
"name": "CIS Amazon Web Services Foundations Benchmark v4.0",
"provider": "aws",
},
...
]
}
List of compliance frameworks matching the search term
"""
try:
response = prowler_hub_client.get("/compliance/search", params={"term": term})
response.raise_for_status()
compliances = response.json()
frameworks = response.json()
# Return compliances as a lightweight list
compliances_list = []
for compliance in compliances:
compliance_data = {
"id": compliance["id"],
"name": compliance["name"],
"provider": compliance["provider"],
}
compliances_list.append(compliance_data)
return {"count": len(compliances), "compliances": compliances_list}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
"count": len(frameworks),
"search_term": term,
"frameworks": frameworks,
}
except Exception as e:
return {"error": str(e)}
@hub_mcp_server.tool()
async def get_compliance_details(
compliance_id: str = Field(
description="The compliance framework ID to retrieve details for. Example: 'cis_4.0_aws'. Use `prowler_hub_list_compliances` or `prowler_hub_semantic_search_compliances` to find available compliance IDs.",
),
) -> dict:
"""Retrieve comprehensive details about a specific compliance framework by its ID.
IMPORTANT: This tool returns COMPLETE compliance details.
Use this after finding a specific compliance via `prowler_hub_list_compliances` or `prowler_hub_semantic_search_compliances`.
Returns:
{
"id": "string",
"name": "string",
"framework": "string",
"provider": "string",
"version": "string",
"description": "string",
"total_checks": int,
"total_requirements": int,
"requirements": [
{
"id": "string",
"name": "string",
"description": "string",
"checks": ["check_id_1", "check_id_2"]
}
]
}
"""
try:
response = prowler_hub_client.get(f"/compliance/{compliance_id}")
response.raise_for_status()
compliance = response.json()
if not compliance:
return {"error": f"Compliance '{compliance_id}' not found"}
# Build response with only non-empty fields to save tokens
result = {}
# Core fields
result["id"] = compliance["id"]
if compliance.get("name"):
result["name"] = compliance["name"]
if compliance.get("framework"):
result["framework"] = compliance["framework"]
if compliance.get("provider"):
result["provider"] = compliance["provider"]
if compliance.get("version"):
result["version"] = compliance["version"]
if compliance.get("description"):
result["description"] = compliance["description"]
# Numeric fields
if compliance.get("total_checks"):
result["total_checks"] = compliance["total_checks"]
if compliance.get("total_requirements"):
result["total_requirements"] = compliance["total_requirements"]
# Requirements - filter out empty nested values
requirements = compliance.get("requirements", [])
if requirements:
filtered_requirements = []
for req in requirements:
filtered_req = {}
if req.get("id"):
filtered_req["id"] = req["id"]
if req.get("name"):
filtered_req["name"] = req["name"]
if req.get("description"):
filtered_req["description"] = req["description"]
if req.get("checks"):
filtered_req["checks"] = req["checks"]
if filtered_req:
filtered_requirements.append(filtered_req)
if filtered_requirements:
result["requirements"] = filtered_requirements
return result
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {"error": f"Compliance '{compliance_id}' not found"}
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
}
@@ -659,28 +423,20 @@ async def get_compliance_details(
# Provider Tools
@hub_mcp_server.tool()
async def list_providers() -> dict:
"""List all providers supported by Prowler.
This is a reference tool that shows available providers (aws, azure, gcp, kubernetes, etc.)
that can be scanned for finding security issues.
Use the provider IDs from this tool as filter values in other tools.
async def list_providers() -> dict[str, Any]:
"""
Get all available Prowler providers and their associated services.
Returns:
List of Prowler providers with their associated services. The structure is as follows:
{
"count": N,
"providers": [
{
"id": "aws",
"name": "Amazon Web Services"
},
{
"id": "azure",
"name": "Microsoft Azure"
},
...
]
"providers": {
"provider_id": {
"name": "provider_name",
"services": ["service_id_1", "service_id_2", "service_id_3", ...]
}
}
}
"""
try:
@@ -688,16 +444,14 @@ async def list_providers() -> dict:
response.raise_for_status()
providers = response.json()
providers_list = []
providers_dict = {}
for provider in providers:
providers_list.append(
{
"id": provider["id"],
"name": provider.get("name", ""),
}
)
providers_dict[provider["id"]] = {
"name": provider.get("name", ""),
"services": provider.get("services", []),
}
return {"count": len(providers), "providers": providers_list}
return {"count": len(providers), "providers": providers_dict}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
@@ -706,42 +460,24 @@ async def list_providers() -> dict:
return {"error": str(e)}
# Analytics Tools
@hub_mcp_server.tool()
async def get_provider_services(
provider_id: str = Field(
description="The provider ID to get services for. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
),
) -> dict:
"""Get the list of services IDs available for a specific cloud provider.
Services represent the different resources and capabilities that Prowler can scan
within a provider (e.g., s3, ec2, iam for AWS or keyvault, storage for Azure).
Use service IDs from this tool as filter values in other tools.
async def get_artifacts_count() -> dict[str, Any]:
"""
Get total count of security artifacts (checks + compliance frameworks).
Returns:
{
"provider_id": "aws",
"provider_name": "Amazon Web Services",
"count": N,
"services": ["s3", "ec2", "iam", "rds", "lambda", ...]
}
Total number of artifacts in the Prowler Hub.
"""
try:
response = prowler_hub_client.get("/providers")
response = prowler_hub_client.get("/n_artifacts")
response.raise_for_status()
providers = response.json()
data = response.json()
for provider in providers:
if provider["id"] == provider_id:
return {
"provider_id": provider["id"],
"provider_name": provider.get("name", ""),
"count": len(provider.get("services", [])),
"services": provider.get("services", []),
}
return {"error": f"Provider '{provider_id}' not found"}
return {
"total_artifacts": data.get("n", 0),
"details": "Total count includes both security checks and compliance frameworks",
}
except httpx.HTTPStatusError as e:
return {
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
+2 -1
View File
@@ -11,9 +11,10 @@ description = "MCP server for Prowler ecosystem"
name = "prowler-mcp"
readme = "README.md"
requires-python = ">=3.12"
version = "0.3.0"
version = "0.1.0"
[project.scripts]
generate-prowler-app-mcp-server = "prowler_mcp_server.prowler_app.utils.server_generator:generate_server_file"
prowler-mcp = "prowler_mcp_server.main:main"
[tool.uv]
+1 -1
View File
@@ -603,7 +603,7 @@ wheels = [
[[package]]
name = "prowler-mcp"
version = "0.3.0"
version = "0.1.0"
source = { editable = "." }
dependencies = [
{ name = "fastmcp" },
+4 -41
View File
@@ -2,36 +2,11 @@
All notable changes to the **Prowler SDK** are documented in this file.
## [5.17.0] (Prowler UNRELEASED)
## [5.16.0] (Prowler UNRELEASED)
### Added
- Add Prowler ThreatScore for the Alibaba Cloud provider [(#9511)](https://github.com/prowler-cloud/prowler/pull/9511)
- `compute_instance_group_multiple_zones` check for GCP provider [(#9566)](https://github.com/prowler-cloud/prowler/pull/9566)
- Support AWS European Sovereign Cloud [(#9649)](https://github.com/prowler-cloud/prowler/pull/9649)
- `compute_instance_disk_auto_delete_disabled` check for GCP provider [(#9604)](https://github.com/prowler-cloud/prowler/pull/9604)
- Bedrock service pagination [(#9606)](https://github.com/prowler-cloud/prowler/pull/9606)
### Changed
- Update AWS Step Functions service metadata to new format [(#9432)](https://github.com/prowler-cloud/prowler/pull/9432)
- Update AWS Route 53 service metadata to new format [(#9406)](https://github.com/prowler-cloud/prowler/pull/9406)
- Update AWS SQS service metadata to new format [(#9429)](https://github.com/prowler-cloud/prowler/pull/9429)
- Update AWS Shield service metadata to new format [(#9427)](https://github.com/prowler-cloud/prowler/pull/9427)
- Update AWS Secrets Manager service metadata to new format [(#9408)](https://github.com/prowler-cloud/prowler/pull/9408)
- Improve SageMaker service tag retrieval with parallel execution [(#9609)](https://github.com/prowler-cloud/prowler/pull/9609)
---
## [5.16.1] (Prowler v5.16.1)
### Fixed
- ZeroDivision error from Prowler ThreatScore [(#9653)](https://github.com/prowler-cloud/prowler/pull/9653)
---
## [5.16.0] (Prowler v5.16.0)
### Added
- `privilege-escalation` and `ec2-imdsv1` categories for AWS checks [(#9537)](https://github.com/prowler-cloud/prowler/pull/9537)
- `privilege-escalation` and `ec2-imdsv1` categories for AWS checks [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
- Supported IaC formats and scanner documentation for the IaC provider [(#9553)](https://github.com/prowler-cloud/prowler/pull/9553)
### Changed
@@ -39,25 +14,14 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Update AWS Kafka service metadata to new format [(#9261)](https://github.com/prowler-cloud/prowler/pull/9261)
- Update AWS KMS service metadata to new format [(#9263)](https://github.com/prowler-cloud/prowler/pull/9263)
- Update AWS MemoryDB service metadata to new format [(#9266)](https://github.com/prowler-cloud/prowler/pull/9266)
- Update AWS Inspector v2 service metadata to new format [(#9260)](https://github.com/prowler-cloud/prowler/pull/9260)
- Update AWS Service Catalog service metadata to new format [(#9410)](https://github.com/prowler-cloud/prowler/pull/9410)
- Update AWS SNS service metadata to new format [(#9428)](https://github.com/prowler-cloud/prowler/pull/9428)
- Update AWS Trusted Advisor service metadata to new format [(#9435)](https://github.com/prowler-cloud/prowler/pull/9435)
- Update AWS WAF service metadata to new format [(#9480)](https://github.com/prowler-cloud/prowler/pull/9480)
- Update AWS WAF v2 service metadata to new format [(#9481)](https://github.com/prowler-cloud/prowler/pull/9481)
### Fixed
- Fix typo `trustboundaries` category to `trust-boundaries` [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
- Fix incorrect `bedrock-agent` regional availability, now using official AWS docs instead of copying from `bedrock`
- Store MongoDB Atlas provider regions as lowercase [(#9554)](https://github.com/prowler-cloud/prowler/pull/9554)
- Store GCP Cloud Storage bucket regions as lowercase [(#9567)](https://github.com/prowler-cloud/prowler/pull/9567)
---
## [5.15.1] (Prowler v5.15.1)
## [5.15.1] (Prowler UNRELEASED)
### Fixed
- Fix false negative in AWS `apigateway_restapi_logging_enabled` check by refining stage logging evaluation to ensure logging level is not set to "OFF" [(#9304)](https://github.com/prowler-cloud/prowler/pull/9304)
- Fix typo `trustboundaries` category to `trust-boundaries` [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
---
@@ -70,7 +34,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `compute_instance_preemptible_vm_disabled` check for GCP provider [(#9342)](https://github.com/prowler-cloud/prowler/pull/9342)
- `compute_instance_automatic_restart_enabled` check for GCP provider [(#9271)](https://github.com/prowler-cloud/prowler/pull/9271)
- `compute_instance_deletion_protection_enabled` check for GCP provider [(#9358)](https://github.com/prowler-cloud/prowler/pull/9358)
- Add needed changes to AlibabaCloud provider from the API [(#9485)](https://github.com/prowler-cloud/prowler/pull/9485)
- Update SOC2 - Azure with Processing Integrity requirements [(#9463)](https://github.com/prowler-cloud/prowler/pull/9463)
- Update SOC2 - GCP with Processing Integrity requirements [(#9464)](https://github.com/prowler-cloud/prowler/pull/9464)
- Update SOC2 - AWS with Processing Integrity requirements [(#9462)](https://github.com/prowler-cloud/prowler/pull/9462)
-15
View File
@@ -83,9 +83,6 @@ from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_azure import (
AzureMitreAttack,
)
from prowler.lib.outputs.compliance.mitre_attack.mitre_attack_gcp import GCPMitreAttack
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_alibaba import (
ProwlerThreatScoreAlibaba,
)
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_aws import (
ProwlerThreatScoreAWS,
)
@@ -1042,18 +1039,6 @@ def prowler():
)
generated_outputs["compliance"].append(cis)
cis.batch_write_data_to_file()
elif compliance_name == "prowler_threatscore_alibabacloud":
filename = (
f"{output_options.output_directory}/compliance/"
f"{output_options.output_filename}_{compliance_name}.csv"
)
prowler_threatscore = ProwlerThreatScoreAlibaba(
findings=finding_outputs,
compliance=bulk_compliance_frameworks[compliance_name],
file_path=filename,
)
generated_outputs["compliance"].append(prowler_threatscore)
prowler_threatscore.batch_write_data_to_file()
else:
filename = (
f"{output_options.output_directory}/compliance/"
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -38,7 +38,7 @@ class _MutableTimestamp:
timestamp = _MutableTimestamp(datetime.today())
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
prowler_version = "5.17.0"
prowler_version = "5.16.0"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
-3
View File
@@ -507,9 +507,6 @@ gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# gcp.compute_instance_group_multiple_zones
# Minimum number of zones a MIG should span for high availability
mig_min_zones: 2
# GCP Service Account and user-managed keys unused configuration
# gcp.iam_service_account_unused
# gcp.iam_sa_user_managed_key_unused
@@ -146,29 +146,3 @@ class ProwlerThreatScoreKubernetesModel(BaseModel):
Muted: bool
Framework: str
Name: str
class ProwlerThreatScoreAlibabaModel(BaseModel):
"""
ProwlerThreatScoreAlibabaModel generates a finding's output in Alibaba Cloud Prowler ThreatScore Compliance format.
"""
Provider: str
Description: str
AccountId: str
Region: str
AssessmentDate: str
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Title: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str] = None
Requirements_Attributes_AttributeDescription: str
Requirements_Attributes_AdditionalInformation: str
Requirements_Attributes_LevelOfRisk: int
Requirements_Attributes_Weight: int
Status: str
StatusExtended: str
ResourceId: str
ResourceName: str
CheckId: str
@@ -103,16 +103,8 @@ def get_prowler_threatscore_table(
for pillar in pillars:
pillar_table["Provider"].append(compliance.Provider)
pillar_table["Pillar"].append(pillar)
if max_score_per_pillar[pillar] == 0:
pillar_score = 100.0
score_color = Fore.GREEN
else:
pillar_score = (
score_per_pillar[pillar] / max_score_per_pillar[pillar]
) * 100
score_color = Fore.RED
pillar_table["Score"].append(
f"{Style.BRIGHT}{score_color}{pillar_score:.2f}%{Style.RESET_ALL}"
f"{Style.BRIGHT}{Fore.RED}{(score_per_pillar[pillar] / max_score_per_pillar[pillar]) * 100:.2f}%{Style.RESET_ALL}"
)
if pillars[pillar]["FAIL"] > 0:
pillar_table["Status"].append(
@@ -156,12 +148,9 @@ def get_prowler_threatscore_table(
print(
f"\nFramework {Fore.YELLOW}{compliance_framework.upper()}{Style.RESET_ALL} Results:"
)
# Handle division by zero when all findings are muted
if max_generic_score == 0:
generic_threat_score = 100.0
else:
generic_threat_score = generic_score / max_generic_score * 100
print(f"\nGeneric Threat Score: {generic_threat_score:.2f}%")
print(
f"\nGeneric Threat Score: {generic_score / max_generic_score * 100:.2f}%"
)
print(
tabulate(
pillar_table,
@@ -1,98 +0,0 @@
from prowler.config.config import timestamp
from prowler.lib.check.compliance_models import Compliance
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
from prowler.lib.outputs.compliance.prowler_threatscore.models import (
ProwlerThreatScoreAlibabaModel,
)
from prowler.lib.outputs.finding import Finding
class ProwlerThreatScoreAlibaba(ComplianceOutput):
"""
This class represents the Alibaba Cloud Prowler ThreatScore compliance output.
Attributes:
- _data (list): A list to store transformed data from findings.
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
Methods:
- transform: Transforms findings into Alibaba Cloud Prowler ThreatScore compliance format.
"""
def transform(
self,
findings: list[Finding],
compliance: Compliance,
compliance_name: str,
) -> None:
"""
Transforms a list of findings into Alibaba Cloud Prowler ThreatScore compliance format.
Parameters:
- findings (list): A list of findings.
- compliance (Compliance): A compliance model.
- compliance_name (str): The name of the compliance model.
Returns:
- None
"""
for finding in findings:
# Get the compliance requirements for the finding
finding_requirements = finding.compliance.get(compliance_name, [])
for requirement in compliance.Requirements:
if requirement.Id in finding_requirements:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAlibabaModel(
Provider=finding.provider,
Description=compliance.Description,
AccountId=finding.account_uid,
Region=finding.region,
AssessmentDate=str(timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Requirements_Attributes_Weight=attribute.Weight,
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_uid,
ResourceName=finding.resource_name,
CheckId=finding.check_id,
Muted=finding.muted,
Framework=compliance.Framework,
Name=compliance.Name,
)
self._data.append(compliance_row)
# Add manual requirements to the compliance output
for requirement in compliance.Requirements:
if not requirement.Checks:
for attribute in requirement.Attributes:
compliance_row = ProwlerThreatScoreAlibabaModel(
Provider=compliance.Provider.lower(),
Description=compliance.Description,
AccountId="",
Region="",
AssessmentDate=str(timestamp),
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Title=attribute.Title,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_AttributeDescription=attribute.AttributeDescription,
Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation,
Requirements_Attributes_LevelOfRisk=attribute.LevelOfRisk,
Requirements_Attributes_Weight=attribute.Weight,
Status="MANUAL",
StatusExtended="Manual check",
ResourceId="manual_check",
ResourceName="Manual check",
CheckId="manual",
Muted=False,
Framework=compliance.Framework,
Name=compliance.Name,
)
self._data.append(compliance_row)
@@ -75,9 +75,6 @@ class AlibabacloudProvider(Provider):
mutelist_path: str = None,
mutelist_content: dict = None,
fixer_config: dict = {},
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
):
"""
Initialize the AlibabaCloudProvider.
@@ -94,9 +91,6 @@ class AlibabacloudProvider(Provider):
mutelist_path: Path to the mutelist file
mutelist_content: Content of the mutelist file
fixer_config: Fixer configuration dictionary
access_key_id: Alibaba Cloud Access Key ID
access_key_secret: Alibaba Cloud Access Key Secret
security_token: STS Security Token (for temporary credentials)
Raises:
AlibabaCloudSetUpSessionError: If an error occurs during the setup process.
@@ -113,7 +107,6 @@ class AlibabacloudProvider(Provider):
- alibabacloud = AlibabacloudProvider(regions=["cn-hangzhou", "cn-shanghai"]) # Specific regions
- alibabacloud = AlibabacloudProvider(role_arn="acs:ram::...:role/ProwlerRole")
- alibabacloud = AlibabacloudProvider(ecs_ram_role="ECS-Prowler-Role")
- alibabacloud = AlibabacloudProvider(access_key_id="LTAI...", access_key_secret="...")
"""
logger.info("Initializing Alibaba Cloud Provider ...")
@@ -125,9 +118,6 @@ class AlibabacloudProvider(Provider):
ecs_ram_role=ecs_ram_role,
oidc_role_arn=oidc_role_arn,
credentials_uri=credentials_uri,
access_key_id=access_key_id,
access_key_secret=access_key_secret,
security_token=security_token,
)
logger.info("Alibaba Cloud session configured successfully")
@@ -244,9 +234,6 @@ class AlibabacloudProvider(Provider):
ecs_ram_role: str = None,
oidc_role_arn: str = None,
credentials_uri: str = None,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
) -> AlibabaCloudSession:
"""
Set up the Alibaba Cloud session.
@@ -257,9 +244,6 @@ class AlibabacloudProvider(Provider):
ecs_ram_role: Name of the RAM role attached to an ECS instance
oidc_role_arn: ARN of the RAM role for OIDC authentication
credentials_uri: URI to retrieve credentials from an external service
access_key_id: Alibaba Cloud Access Key ID
access_key_secret: Alibaba Cloud Access Key Secret
security_token: STS Security Token (for temporary credentials)
Returns:
AlibabaCloudSession object
@@ -291,22 +275,25 @@ class AlibabacloudProvider(Provider):
if not ecs_ram_role and "ALIBABA_CLOUD_ECS_METADATA" in os.environ:
ecs_ram_role = os.environ["ALIBABA_CLOUD_ECS_METADATA"]
# Check for access key credentials from parameters first, then fall back to environment variables
# Check for access key credentials from environment variables only
# Support both ALIBABA_CLOUD_* and ALIYUN_* prefixes for compatibility
if not access_key_id:
if "ALIBABA_CLOUD_ACCESS_KEY_ID" in os.environ:
access_key_id = os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"]
elif "ALIYUN_ACCESS_KEY_ID" in os.environ:
access_key_id = os.environ["ALIYUN_ACCESS_KEY_ID"]
# Note: We intentionally do NOT support credentials via CLI arguments for security reasons
access_key_id = None
access_key_secret = None
security_token = None
if not access_key_secret:
if "ALIBABA_CLOUD_ACCESS_KEY_SECRET" in os.environ:
access_key_secret = os.environ["ALIBABA_CLOUD_ACCESS_KEY_SECRET"]
elif "ALIYUN_ACCESS_KEY_SECRET" in os.environ:
access_key_secret = os.environ["ALIYUN_ACCESS_KEY_SECRET"]
if "ALIBABA_CLOUD_ACCESS_KEY_ID" in os.environ:
access_key_id = os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"]
elif "ALIYUN_ACCESS_KEY_ID" in os.environ:
access_key_id = os.environ["ALIYUN_ACCESS_KEY_ID"]
if "ALIBABA_CLOUD_ACCESS_KEY_SECRET" in os.environ:
access_key_secret = os.environ["ALIBABA_CLOUD_ACCESS_KEY_SECRET"]
elif "ALIYUN_ACCESS_KEY_SECRET" in os.environ:
access_key_secret = os.environ["ALIYUN_ACCESS_KEY_SECRET"]
# Check for STS security token (for temporary credentials)
if not security_token and "ALIBABA_CLOUD_SECURITY_TOKEN" in os.environ:
if "ALIBABA_CLOUD_SECURITY_TOKEN" in os.environ:
security_token = os.environ["ALIBABA_CLOUD_SECURITY_TOKEN"]
# Check for RAM role assumption from CLI arguments or environment
@@ -708,9 +695,6 @@ class AlibabacloudProvider(Provider):
@staticmethod
def test_connection(
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
role_arn: str = None,
role_session_name: str = None,
ecs_ram_role: str = None,
@@ -723,9 +707,6 @@ class AlibabacloudProvider(Provider):
Test the connection to Alibaba Cloud with the provided credentials.
Args:
access_key_id: Alibaba Cloud Access Key ID (for static credentials)
access_key_secret: Alibaba Cloud Access Key Secret (for static credentials)
security_token: STS Security Token (for temporary credentials)
role_arn: ARN of the RAM role to assume
role_session_name: Session name when assuming the RAM role
ecs_ram_role: Name of the RAM role attached to an ECS instance
@@ -753,24 +734,17 @@ class AlibabacloudProvider(Provider):
raise_on_exception=False
)
Connection(is_connected=True, Error=None)
>>> AlibabacloudProvider.test_connection(
access_key_id="LTAI...",
access_key_secret="...",
raise_on_exception=False
)
Connection(is_connected=True, Error=None)
"""
try:
# Setup session - pass credentials directly instead of using env vars
session = None
# Setup session
session = AlibabacloudProvider.setup_session(
role_arn=role_arn,
role_session_name=role_session_name,
ecs_ram_role=ecs_ram_role,
oidc_role_arn=oidc_role_arn,
credentials_uri=credentials_uri,
access_key_id=access_key_id,
access_key_secret=access_key_secret,
security_token=security_token,
)
# Validate credentials
@@ -781,6 +755,10 @@ class AlibabacloudProvider(Provider):
# Validate provider_id if provided
if provider_id and caller_identity.account_id != provider_id:
from prowler.providers.alibabacloud.exceptions.exceptions import (
AlibabaCloudInvalidCredentialsError,
)
raise AlibabaCloudInvalidCredentialsError(
file=pathlib.Path(__file__).name,
message=f"Provider ID mismatch: expected '{provider_id}', got '{caller_identity.account_id}'",
+5 -8
View File
@@ -984,8 +984,6 @@ class AwsProvider(Provider):
global_region = "us-east-1"
if self._identity.partition == "aws-cn":
global_region = "cn-north-1"
elif self._identity.partition == "aws-eusc":
global_region = "eusc-de-east-1"
elif self._identity.partition == "aws-us-gov":
global_region = "us-gov-east-1"
elif "aws-iso" in self._identity.partition:
@@ -1475,12 +1473,11 @@ class AwsProvider(Provider):
sts_client = create_sts_session(session, 'us-west-2')
"""
try:
if aws_region.startswith("cn-"):
sts_endpoint_url = f"https://sts.{aws_region}.amazonaws.com.cn"
elif aws_region.startswith("eusc-"):
sts_endpoint_url = f"https://sts.{aws_region}.amazonaws.eu"
else:
sts_endpoint_url = f"https://sts.{aws_region}.amazonaws.com"
sts_endpoint_url = (
f"https://sts.{aws_region}.amazonaws.com"
if not aws_region.startswith("cn-")
else f"https://sts.{aws_region}.amazonaws.com.cn"
)
return session.client("sts", aws_region, endpoint_url=sts_endpoint_url)
except Exception as error:
logger.critical(
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -59,5 +59,5 @@ def parse_iam_credentials_arn(arn: str) -> ARN:
def is_valid_arn(arn: str) -> bool:
"""is_valid_arn returns True or False whether the given AWS ARN (Amazon Resource Name) is valid or not."""
regex = r"^arn:aws(-cn|-eusc|-us-gov|-iso|-iso-b)?:[a-zA-Z0-9\-]+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:[a-zA-Z0-9\-_\/:\.\*]+(:\d+)?$"
regex = r"^arn:aws(-cn|-us-gov|-iso|-iso-b)?:[a-zA-Z0-9\-]+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:[a-zA-Z0-9\-_\/:\.\*]+(:\d+)?$"
return re.match(regex, arn) is not None
@@ -55,7 +55,7 @@ class SecurityHubConnection(Connection):
Attributes:
enabled_regions (set): Set of regions where Security Hub is enabled.
disabled_regions (set): Set of regions where Security Hub is disabled.
partition (str): AWS partition (e.g., aws, aws-cn, aws-eusc, aws-us-gov) where SecurityHub is deployed.
partition (str): AWS partition (e.g., aws, aws-cn, aws-us-gov) where SecurityHub is deployed.
"""
enabled_regions: set = None
@@ -70,7 +70,7 @@ class SecurityHub:
Attributes:
_session (Session): AWS session object for authentication and communication with AWS services.
_aws_account_id (str): AWS account ID associated with the SecurityHub instance.
_aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-eusc, aws-us-gov) where SecurityHub is deployed.
_aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-us-gov) where SecurityHub is deployed.
_findings_per_region (dict): Dictionary containing findings per region.
_enabled_regions (dict): Dictionary containing enabled regions with SecurityHub clients.
@@ -115,7 +115,7 @@ class SecurityHub:
Args:
- aws_session (Session): AWS session object for authentication and communication with AWS services.
- aws_account_id (str): AWS account ID associated with the SecurityHub instance.
- aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-eusc, aws-us-gov) where SecurityHub is deployed.
- aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-us-gov) where SecurityHub is deployed.
- findings (list[AWSSecurityFindingFormat]): List of findings to filter and send to Security Hub.
- aws_security_hub_available_regions (list[str]): List of regions where Security Hub is available.
- send_only_fails (bool): Flag indicating whether to send only findings with status 'FAIL'.
@@ -477,7 +477,7 @@ class SecurityHub:
Args:
aws_account_id (str): AWS account ID to check for Prowler integration.
aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-eusc, aws-us-gov).
aws_partition (str): AWS partition (e.g., aws, aws-cn, aws-us-gov).
regions (set): Set of regions to check for Security Hub integration.
raise_on_exception (bool): Whether to raise an exception if an error occurs.
profile (str): AWS profile name to use for authentication.
-2
View File
@@ -90,7 +90,6 @@ class Partition(str, Enum):
Attributes:
aws (str): Represents the standard AWS commercial regions.
aws_cn (str): Represents the AWS China regions.
aws_eusc (str): Represents the AWS European Sovereign Cloud regions.
aws_us_gov (str): Represents the AWS GovCloud (US) Regions.
aws_iso (str): Represents the AWS ISO (US) Regions.
aws_iso_b (str): Represents the AWS ISOB (US) Regions.
@@ -100,7 +99,6 @@ class Partition(str, Enum):
aws = "aws"
aws_cn = "aws-cn"
aws_eusc = "aws-eusc"
aws_us_gov = "aws-us-gov"
aws_iso = "aws-iso"
aws_iso_b = "aws-iso-b"
@@ -55,18 +55,16 @@ class Bedrock(AWSService):
def _list_guardrails(self, regional_client):
logger.info("Bedrock - Listing Guardrails...")
try:
paginator = regional_client.get_paginator("list_guardrails")
for page in paginator.paginate():
for guardrail in page.get("guardrails", []):
if not self.audit_resources or (
is_resource_filtered(guardrail["arn"], self.audit_resources)
):
self.guardrails[guardrail["arn"]] = Guardrail(
id=guardrail["id"],
name=guardrail["name"],
arn=guardrail["arn"],
region=regional_client.region,
)
for guardrail in regional_client.list_guardrails().get("guardrails", []):
if not self.audit_resources or (
is_resource_filtered(guardrail["arn"], self.audit_resources)
):
self.guardrails[guardrail["arn"]] = Guardrail(
id=guardrail["id"],
name=guardrail["name"],
arn=guardrail["arn"],
region=regional_client.region,
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -132,22 +130,20 @@ class BedrockAgent(AWSService):
def _list_agents(self, regional_client):
logger.info("Bedrock Agent - Listing Agents...")
try:
paginator = regional_client.get_paginator("list_agents")
for page in paginator.paginate():
for agent in page.get("agentSummaries", []):
agent_arn = f"arn:aws:bedrock:{regional_client.region}:{self.audited_account}:agent/{agent['agentId']}"
if not self.audit_resources or (
is_resource_filtered(agent_arn, self.audit_resources)
):
self.agents[agent_arn] = Agent(
id=agent["agentId"],
name=agent["agentName"],
arn=agent_arn,
guardrail_id=agent.get("guardrailConfiguration", {}).get(
"guardrailIdentifier"
),
region=regional_client.region,
)
for agent in regional_client.list_agents().get("agentSummaries", []):
agent_arn = f"arn:aws:bedrock:{regional_client.region}:{self.audited_account}:agent/{agent['agentId']}"
if not self.audit_resources or (
is_resource_filtered(agent_arn, self.audit_resources)
):
self.agents[agent_arn] = Agent(
id=agent["agentId"],
name=agent["agentName"],
arn=agent_arn,
guardrail_id=agent.get("guardrailConfiguration", {}).get(
"guardrailIdentifier"
),
region=regional_client.region,
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -1,39 +1,29 @@
{
"Provider": "aws",
"CheckID": "inspector2_active_findings_exist",
"CheckTitle": "Inspector2 is enabled with no active findings",
"CheckTitle": "Check if Inspector2 active findings exist",
"CheckAliases": [
"inspector2_findings_exist"
],
"CheckType": [
"Software and Configuration Checks/Vulnerabilities/CVE",
"Software and Configuration Checks/Patch Management",
"Software and Configuration Checks/AWS Security Best Practices",
"Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"CheckType": [],
"ServiceName": "inspector2",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceIdTemplate": "arn:aws:inspector2:region:account-id/detector-id",
"Severity": "medium",
"ResourceType": "Other",
"Description": "**Amazon Inspector2** active findings are assessed across eligible resources when the service is `ENABLED`.\n\nIndicates whether any findings remain in the **Active** state versus none.",
"Risk": "**Unremediated Inspector2 findings** mean known vulnerabilities or exposures persist on workloads.\n\nThis enables:\n- Unauthorized access and data exfiltration (C)\n- Code tampering and privilege escalation (I)\n- Service disruption via exploitation or malware (A)",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector/amazon-inspector-findings.html",
"https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
"https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html"
],
"Description": "This check determines if there are any active findings in your AWS account that have been detected by AWS Inspector2. Inspector2 is an automated security assessment service that helps improve the security and compliance of applications deployed on AWS.",
"Risk": "Without using AWS Inspector, you may not be aware of all the security vulnerabilities in your AWS resources, which could lead to unauthorized access, data breaches, or other security incidents.",
"RelatedUrl": "https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
"Remediation": {
"Code": {
"CLI": "aws inspector2 create-filter --name <example_resource_name> --action SUPPRESS --filter-criteria '{\"findingStatus\":[{\"comparison\":\"EQUALS\",\"value\":\"ACTIVE\"}]}'",
"NativeIaC": "```yaml\n# CloudFormation: Suppress all ACTIVE Inspector findings\nResources:\n <example_resource_name>:\n Type: AWS::InspectorV2::Filter\n Properties:\n Name: <example_resource_name>\n Action: SUPPRESS # critical: converts matching findings to Suppressed, not Active\n FilterCriteria:\n FindingStatus:\n - Comparison: EQUALS\n Value: ACTIVE # critical: targets all active findings\n```",
"Other": "1. In the AWS Console, go to Amazon Inspector\n2. Open Suppression rules (or Filters) and click Create suppression rule\n3. Set condition: Finding status = Active\n4. Set action to Suppress and click Create\n5. Verify the Active findings count is 0 on the dashboard",
"Terraform": "```hcl\n# Terraform: Suppress all ACTIVE Inspector findings\nresource \"aws_inspector2_filter\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n action = \"SUPPRESS\" # critical: converts matching findings to Suppressed, not Active\n\n filter_criteria {\n finding_status {\n comparison = \"EQUALS\"\n value = \"ACTIVE\" # critical: targets all active findings\n }\n }\n}\n```"
"CLI": "",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector/amazon-inspector-findings.html",
"Terraform": ""
},
"Recommendation": {
"Text": "Prioritize and remediate **Active findings** quickly: patch hosts and runtimes, update/rebuild images, fix vulnerable code, and close unintended exposure.\n\nApply **least privilege**, use **defense in depth**, and avoid broad suppressions. Integrate findings into CI/CD and vulnerability management for continuous prevention.",
"Url": "https://hub.prowler.com/check/inspector2_active_findings_exist"
"Text": "Review the active findings from Inspector2",
"Url": "https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html"
}
},
"Categories": [],
@@ -1,37 +1,31 @@
{
"Provider": "aws",
"CheckID": "inspector2_is_enabled",
"CheckTitle": "Inspector2 is enabled for Amazon EC2 instances, ECR container images, Lambda functions, and Lambda code",
"CheckTitle": "Check if Inspector2 is enabled for Amazon EC2 instances, ECR container images and Lambda functions.",
"CheckAliases": [
"inspector2_findings_exist"
],
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices"
],
"ServiceName": "inspector2",
"SubServiceName": "",
"ResourceIdTemplate": "",
"ResourceIdTemplate": "arn:aws:inspector2:region:account-id/detector-id",
"Severity": "medium",
"ResourceType": "Other",
"Description": "**Amazon Inspector 2** activation and coverage across regions, verifying that scanning is active for **EC2**, **ECR**, **Lambda functions**, and **Lambda code** where applicable.\n\nIt flags missing account activation or gaps in any scan type.",
"Risk": "Absent or partial coverage leaves **unpatched vulnerabilities**, risky **code dependencies**, and **unintended network exposure** undetected.\n\nAttackers can exploit known CVEs for **remote code execution**, **lateral movement**, and **data exfiltration**, degrading **confidentiality**, **integrity**, and **availability**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector2/enable-amazon-inspector2.html",
"https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
"https://docs.aws.amazon.com/inspector/latest/user/getting_started_tutorial.html"
],
"ResourceType": "AwsAccount",
"Description": "Ensure that the new version of Amazon Inspector is enabled in order to help you improve the security and compliance of your AWS cloud environment. Amazon Inspector 2 is a vulnerability management solution that continually scans scans your Amazon EC2 instances, ECR container images, and Lambda functions to identify software vulnerabilities and instances of unintended network exposure.",
"Risk": "Without using AWS Inspector, you may not be aware of all the security vulnerabilities in your AWS resources, which could lead to unauthorized access, data breaches, or other security incidents.",
"RelatedUrl": "https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
"Remediation": {
"Code": {
"CLI": "aws inspector2 enable --resource-types EC2 ECR LAMBDA LAMBDA_CODE",
"CLI": "aws inspector2 enable --resource-types 'EC2' 'ECR' 'LAMBDA' 'LAMBDA_CODE'",
"NativeIaC": "",
"Other": "1. Sign in to the AWS Console and open Amazon Inspector (v2)\n2. If not yet activated: click Get started > Activate Amazon Inspector\n3. If already activated: go to Settings > Scans and ensure EC2, ECR, Lambda functions, and Lambda code are all enabled, then Save",
"Terraform": "```hcl\nresource \"aws_inspector2_enabler\" \"<example_resource_name>\" {\n resource_types = [\"EC2\", \"ECR\", \"LAMBDA\", \"LAMBDA_CODE\"] # Enables Inspector2 scans for all required resource types\n}\n```"
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector2/enable-amazon-inspector2.html",
"Terraform": ""
},
"Recommendation": {
"Text": "Enable **Amazon Inspector 2** across all regions and activate scans for **EC2**, **ECR**, **Lambda**, and **Lambda code**.\n\nApply **defense in depth**: auto-enable coverage for new workloads, integrate findings with patching and CI/CD gates, enforce remediation SLAs, and grant only **least privilege** to process and act on findings.",
"Url": "https://hub.prowler.com/check/inspector2_is_enabled"
"Text": "Enable Amazon Inspector 2 for your AWS account.",
"Url": "https://docs.aws.amazon.com/inspector/latest/user/getting_started_tutorial.html"
}
},
"Categories": [],

Some files were not shown because too many files have changed in this diff Show More