mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-09 00:47:04 +00:00
Compare commits
69 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9e4e50a738 | |||
| 34727a7237 | |||
| 4216a3e23a | |||
| a59192e6f5 | |||
| 592bc6f6a8 | |||
| 962ebac8e4 | |||
| 2c5d47a8cd | |||
| bcaa6ac488 | |||
| 71683f3093 | |||
| 2357af912d | |||
| 7971b40f49 | |||
| e585ae45bd | |||
| 4d9921a9b7 | |||
| 19b602c381 | |||
| 4c3e741af7 | |||
| 8affbf44ce | |||
| 16798e293d | |||
| 1194d34396 | |||
| 98277689f5 | |||
| 0ddd7fbd69 | |||
| 22b233f206 | |||
| aa759ab6b7 | |||
| 369d6cecc1 | |||
| d23c2f3b53 | |||
| 786059bfb2 | |||
| 703a33108c | |||
| 7c6d658154 | |||
| 21d7d08b4b | |||
| f314725f4d | |||
| 02f43a7ad6 | |||
| 0dd8981ee4 | |||
| 269e51259d | |||
| f4afdf0541 | |||
| 652cb69216 | |||
| 921f49a0de | |||
| 6cb770fcc8 | |||
| 86449fb99d | |||
| 40dd0e640b | |||
| 8db3a89669 | |||
| c802dc8a36 | |||
| 3ab9a4efa5 | |||
| 36b8aa1b79 | |||
| e821e07d7d | |||
| 228fe6d579 | |||
| 578186aa40 | |||
| 4608e45c8a | |||
| 5987651aee | |||
| 85800f2ddd | |||
| 4fb5272362 | |||
| 85d38b5f71 | |||
| 59dcdb87c4 | |||
| 9297453b8a | |||
| dd37f4ee1f | |||
| 20f36f7c84 | |||
| ec4d27746f | |||
| 7076900fb1 | |||
| 5d90352a0f | |||
| a981dc64a7 | |||
| d2086cad3f | |||
| 380b89cfb6 | |||
| 13b04d339b | |||
| be3c5fb3c1 | |||
| 1de01bcb78 | |||
| 13d983450c | |||
| 8b368e1343 | |||
| c76a9baa20 | |||
| 30e2813e02 | |||
| 0f874c6ffd | |||
| 2242689295 |
@@ -145,7 +145,7 @@ SENTRY_RELEASE=local
|
||||
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.25.0
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.26.0
|
||||
|
||||
# Social login credentials
|
||||
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [prowler-cloud]
|
||||
# patreon: # Replace with a single Patreon username
|
||||
# open_collective: # Replace with a single Open Collective username
|
||||
# ko_fi: # Replace with a single Ko-fi username
|
||||
# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
# liberapay: # Replace with a single Liberapay username
|
||||
# issuehunt: # Replace with a single IssueHunt username
|
||||
# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
# polar: # Replace with a single Polar username
|
||||
# buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
|
||||
# thanks_dev: # Replace with a single thanks.dev username
|
||||
# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
@@ -0,0 +1,143 @@
|
||||
name: "🔎 New Check Request"
|
||||
description: Request a new Prowler security check
|
||||
title: "[New Check]: "
|
||||
labels: ["feature-request", "status/needs-triage"]
|
||||
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: search
|
||||
attributes:
|
||||
label: Existing check search
|
||||
description: Confirm this check does not already exist before opening a new request.
|
||||
options:
|
||||
- label: I have searched existing issues, Prowler Hub, and the public roadmap, and this check does not already exist.
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Use this form to describe the security condition that Prowler should evaluate.
|
||||
|
||||
The most useful inputs for [Prowler Studio](https://github.com/prowler-cloud/prowler-studio) are:
|
||||
- What should be detected
|
||||
- What PASS and FAIL mean
|
||||
- Vendor docs, API references, SDK methods, CLI commands, or reference code
|
||||
|
||||
- type: dropdown
|
||||
id: provider
|
||||
attributes:
|
||||
label: Provider
|
||||
description: Cloud or platform this check targets.
|
||||
options:
|
||||
- AWS
|
||||
- Azure
|
||||
- GCP
|
||||
- Kubernetes
|
||||
- GitHub
|
||||
- Microsoft 365
|
||||
- OCI
|
||||
- Alibaba Cloud
|
||||
- Cloudflare
|
||||
- MongoDB Atlas
|
||||
- Google Workspace
|
||||
- OpenStack
|
||||
- Vercel
|
||||
- NHN
|
||||
- Other / New provider
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: other_provider_name
|
||||
attributes:
|
||||
label: New provider name
|
||||
description: Only fill this if you selected "Other / New provider" above.
|
||||
placeholder: "NewProviderName"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: service_name
|
||||
attributes:
|
||||
label: Service or product area
|
||||
description: Optional. Main service, product, or feature to audit.
|
||||
placeholder: "s3, bedrock, entra, repository, apiserver"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: suggested_check_name
|
||||
attributes:
|
||||
label: Suggested check name
|
||||
description: Optional. Use `snake_case` following `<service>_<resource>_<best_practice>`, with lowercase letters and underscores only.
|
||||
placeholder: "bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Context and goal
|
||||
description: Describe the security problem, why it matters, and what this new check should help detect.
|
||||
placeholder: |-
|
||||
- Security condition to validate:
|
||||
- Why it matters:
|
||||
- Resource, feature, or configuration involved:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected_behavior
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: Explain what the check should evaluate and what PASS, FAIL, or MANUAL should mean.
|
||||
placeholder: |-
|
||||
- Resource or scope to evaluate:
|
||||
- PASS when:
|
||||
- FAIL when:
|
||||
- MANUAL when (if applicable):
|
||||
- Exclusions, thresholds, or edge cases:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: references
|
||||
attributes:
|
||||
label: References
|
||||
description: Add vendor docs, API references, SDK methods, CLI commands, endpoint docs, sample payloads, or similar reference material.
|
||||
placeholder: |-
|
||||
- Product or service documentation:
|
||||
- API or SDK reference:
|
||||
- CLI command or endpoint documentation:
|
||||
- Sample payload or response:
|
||||
- Security advisory or benchmark:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: severity
|
||||
attributes:
|
||||
label: Suggested severity
|
||||
description: Your best estimate. Reviewers will confirm during triage.
|
||||
options:
|
||||
- Critical
|
||||
- High
|
||||
- Medium
|
||||
- Low
|
||||
- Informational
|
||||
- Not sure
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: implementation_notes
|
||||
attributes:
|
||||
label: Additional implementation notes
|
||||
description: Optional. Add permissions, unsupported regions, config knobs, product limitations, or anything else that may affect implementation.
|
||||
placeholder: |-
|
||||
- Required permissions or scopes:
|
||||
- Region, tenant, or subscription limitations:
|
||||
- Configurable behavior or thresholds:
|
||||
- Other constraints:
|
||||
validations:
|
||||
required: false
|
||||
@@ -158,7 +158,7 @@ jobs:
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }},scope=${{ matrix.arch }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
|
||||
@@ -5,10 +5,16 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -57,16 +63,7 @@ jobs:
|
||||
|
||||
api-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -119,23 +116,22 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Build container for ${{ matrix.arch }}
|
||||
- name: Build container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }}
|
||||
|
||||
- name: Scan container with Trivy for ${{ matrix.arch }}
|
||||
- name: Scan container with Trivy
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -5,10 +5,20 @@ on:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-tests.yml'
|
||||
- '.github/workflows/api-security.yml'
|
||||
- '.github/actions/setup-python-poetry/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-tests.yml'
|
||||
- '.github/workflows/api-security.yml'
|
||||
- '.github/actions/setup-python-poetry/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
@@ -4,8 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v3'
|
||||
- 'v4.*'
|
||||
- 'v5.*'
|
||||
types:
|
||||
- 'opened'
|
||||
|
||||
@@ -43,14 +43,11 @@ jobs:
|
||||
|
||||
echo "Processing release tag: $RELEASE_TAG"
|
||||
|
||||
# Remove 'v' prefix if present (e.g., v3.2.0 -> 3.2.0)
|
||||
VERSION_ONLY="${RELEASE_TAG#v}"
|
||||
|
||||
# Check if it's a minor version (X.Y.0)
|
||||
if [[ "$VERSION_ONLY" =~ ^([0-9]+)\.([0-9]+)\.0$ ]]; then
|
||||
echo "Release $RELEASE_TAG (version $VERSION_ONLY) is a minor version. Proceeding to create backport label."
|
||||
|
||||
# Extract X.Y from X.Y.0 (e.g., 5.6 from 5.6.0)
|
||||
MAJOR="${BASH_REMATCH[1]}"
|
||||
MINOR="${BASH_REMATCH[2]}"
|
||||
TWO_DIGIT_VERSION="${MAJOR}.${MINOR}"
|
||||
@@ -62,7 +59,6 @@ jobs:
|
||||
echo "Label name: $LABEL_NAME"
|
||||
echo "Label description: $LABEL_DESC"
|
||||
|
||||
# Check if label already exists
|
||||
if gh label list --repo ${{ github.repository }} --limit 1000 | grep -q "^${LABEL_NAME}[[:space:]]"; then
|
||||
echo "Label '$LABEL_NAME' already exists."
|
||||
else
|
||||
|
||||
@@ -37,10 +37,13 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# PRs only need the diff range; push to master/release walks the new range from event.before.
|
||||
# 50 is enough headroom for the longest realistic PR/push chain without paying for a full clone.
|
||||
fetch-depth: 50
|
||||
persist-credentials: false
|
||||
|
||||
- name: Scan for secrets with TruffleHog
|
||||
- name: Scan diff for secrets with TruffleHog
|
||||
# Action auto-injects --since-commit/--branch from event payload; passing them in extra_args produces duplicate flags.
|
||||
uses: trufflesecurity/trufflehog@ef6e76c3c4023279497fab4721ffa071a722fd05 # v3.92.4
|
||||
with:
|
||||
extra_args: '--results=verified,unknown'
|
||||
extra_args: --results=verified,unknown
|
||||
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
"Alan-TheGentleman"
|
||||
"alejandrobailo"
|
||||
"amitsharm"
|
||||
"andoniaf"
|
||||
# "andoniaf"
|
||||
"cesararroba"
|
||||
"danibarranqueroo"
|
||||
"HugoPBrito"
|
||||
|
||||
@@ -152,7 +152,7 @@ jobs:
|
||||
org.opencontainers.image.created=${{ github.event_name == 'release' && github.event.release.published_at || github.event.head_commit.timestamp }}
|
||||
${{ github.event_name == 'release' && format('org.opencontainers.image.version={0}', env.RELEASE_TAG) || '' }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }},scope=${{ matrix.arch }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
|
||||
@@ -5,10 +5,16 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'mcp_server/**'
|
||||
- '.github/workflows/mcp-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'mcp_server/**'
|
||||
- '.github/workflows/mcp-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -56,16 +62,7 @@ jobs:
|
||||
|
||||
mcp-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -112,23 +109,22 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Build MCP container for ${{ matrix.arch }}
|
||||
- name: Build MCP container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
|
||||
with:
|
||||
context: ${{ env.MCP_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }}
|
||||
|
||||
- name: Scan MCP container with Trivy for ${{ matrix.arch }}
|
||||
- name: Scan MCP container with Trivy
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -86,11 +86,32 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
# The MCP server version (mcp_server/pyproject.toml) is decoupled from the Prowler release
|
||||
# version: it only changes when MCP code changes. mcp-bump-version.yml normally keeps it in
|
||||
# sync with mcp_server/CHANGELOG.md, but this publish workflow still runs on every release.
|
||||
# Pre-flight PyPI check covers the legitimate "no MCP changes for this release" case (and any
|
||||
# workflow_dispatch re-runs) without failing with HTTP 400 (version exists).
|
||||
- name: Check if prowler-mcp version already exists on PyPI
|
||||
id: pypi-check
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
run: |
|
||||
MCP_VERSION=$(grep '^version' pyproject.toml | head -1 | sed -E 's/^version[[:space:]]*=[[:space:]]*"([^"]+)".*/\1/')
|
||||
echo "mcp_version=${MCP_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
if curl -fsS "https://pypi.org/pypi/prowler-mcp/${MCP_VERSION}/json" >/dev/null 2>&1; then
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Skipping prowler-mcp publish::Version ${MCP_VERSION} already exists on PyPI; bump mcp_server/pyproject.toml to publish a new release."
|
||||
else
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Publishing prowler-mcp::Version ${MCP_VERSION} not on PyPI yet; proceeding."
|
||||
fi
|
||||
|
||||
- name: Build prowler-mcp package
|
||||
if: steps.pypi-check.outputs.skip != 'true'
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
run: uv build
|
||||
|
||||
- name: Publish prowler-mcp package to PyPI
|
||||
if: steps.pypi-check.outputs.skip != 'true'
|
||||
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
|
||||
with:
|
||||
packages-dir: ${{ env.WORKING_DIRECTORY }}/dist/
|
||||
|
||||
@@ -0,0 +1,98 @@
|
||||
name: 'Nightly: ARM64 Container Builds'
|
||||
|
||||
# Mitigation for amd64-only PR container-checks: build amd64+arm64 nightly against
|
||||
# master to keep arm-specific Dockerfile regressions caught quickly. Build only —
|
||||
# no push, no Trivy (weekly checks already cover that).
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 4 * * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
build-arm64:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- component: sdk
|
||||
context: .
|
||||
dockerfile: ./Dockerfile
|
||||
image_name: prowler
|
||||
- component: api
|
||||
context: ./api
|
||||
dockerfile: ./api/Dockerfile
|
||||
image_name: prowler-api
|
||||
- component: ui
|
||||
context: ./ui
|
||||
dockerfile: ./ui/Dockerfile
|
||||
image_name: prowler-ui
|
||||
target: prod
|
||||
build_args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
- component: mcp
|
||||
context: ./mcp_server
|
||||
dockerfile: ./mcp_server/Dockerfile
|
||||
image_name: prowler-mcp
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Build ${{ matrix.component }} container (linux/arm64)
|
||||
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
|
||||
with:
|
||||
context: ${{ matrix.context }}
|
||||
file: ${{ matrix.dockerfile }}
|
||||
target: ${{ matrix.target }}
|
||||
push: false
|
||||
load: false
|
||||
platforms: linux/arm64
|
||||
tags: ${{ matrix.image_name }}:nightly-arm64
|
||||
build-args: ${{ matrix.build_args }}
|
||||
cache-from: type=gha,scope=arm64
|
||||
cache-to: type=gha,mode=min,scope=arm64
|
||||
|
||||
notify-failure:
|
||||
needs: build-arm64
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
with:
|
||||
method: chat.postMessage
|
||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload: |
|
||||
channel: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
text: ":rotating_light: Nightly arm64 container build failed for prowler — <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|view run>"
|
||||
errors: true
|
||||
@@ -41,10 +41,15 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-depth: 1
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Fetch PR base ref for tj-actions/changed-files
|
||||
env:
|
||||
BASE_REF: ${{ github.event.pull_request.base.ref }}
|
||||
run: git fetch --depth=1 origin "${BASE_REF}"
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
|
||||
|
||||
@@ -45,10 +45,15 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-depth: 1
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Fetch PR base ref for tj-actions/changed-files
|
||||
env:
|
||||
BASE_REF: ${{ github.event.pull_request.base.ref }}
|
||||
run: git fetch --depth=1 origin "${BASE_REF}"
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
|
||||
|
||||
@@ -36,8 +36,14 @@ jobs:
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
fetch-depth: 1
|
||||
# zizmor: ignore[artipacked]
|
||||
persist-credentials: true # Required by tj-actions/changed-files to fetch PR branch
|
||||
|
||||
- name: Fetch PR base ref for tj-actions/changed-files
|
||||
env:
|
||||
BASE_REF: ${{ github.event.pull_request.base.ref }}
|
||||
run: git fetch --depth=1 origin "${BASE_REF}"
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
|
||||
@@ -5,6 +5,9 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'tests/providers/**/*_test.py'
|
||||
- '.github/workflows/sdk-check-duplicate-test-names.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
@@ -3,9 +3,7 @@ name: 'SDK: Container Build and Push'
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v3' # For v3-latest
|
||||
- 'v4.6' # For v4-latest
|
||||
- 'master' # For latest
|
||||
- 'master'
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '!.github/workflows/sdk-container-build-push.yml'
|
||||
@@ -56,7 +54,6 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
latest_tag: ${{ steps.get-prowler-version.outputs.latest_tag }}
|
||||
stable_tag: ${{ steps.get-prowler-version.outputs.stable_tag }}
|
||||
permissions:
|
||||
@@ -92,32 +89,13 @@ jobs:
|
||||
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
|
||||
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Extract major version
|
||||
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
|
||||
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Set version-specific tags
|
||||
case ${PROWLER_VERSION_MAJOR} in
|
||||
3)
|
||||
echo "latest_tag=v3-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v3-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
|
||||
;;
|
||||
4)
|
||||
echo "latest_tag=v4-latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=v4-stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
|
||||
;;
|
||||
5)
|
||||
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Prowler v5 detected - tags: latest, stable"
|
||||
;;
|
||||
*)
|
||||
echo "::error::Unsupported Prowler major version: ${PROWLER_VERSION_MAJOR}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if [[ "${PROWLER_VERSION_MAJOR}" != "5" ]]; then
|
||||
echo "::error::Unsupported Prowler major version: ${PROWLER_VERSION_MAJOR}"
|
||||
exit 1
|
||||
fi
|
||||
echo "latest_tag=latest" >> "${GITHUB_OUTPUT}"
|
||||
echo "stable_tag=stable" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
notify-release-started:
|
||||
if: github.repository == 'prowler-cloud/prowler' && (github.event_name == 'release' || github.event_name == 'workflow_dispatch')
|
||||
@@ -228,7 +206,7 @@ jobs:
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.latest_tag }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }},scope=${{ matrix.arch }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
@@ -386,39 +364,3 @@ jobs:
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.outcome.outputs.outcome }}
|
||||
update-ts: ${{ needs.notify-release-started.outputs.message-ts }}
|
||||
|
||||
dispatch-v3-deployment:
|
||||
needs: [setup, container-build-push]
|
||||
if: always() && needs.setup.outputs.prowler_version_major == '3' && needs.setup.result == 'success' && needs.container-build-push.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@fa2e9d605c4eeb9fcad4c99c224cee0c6c7f3594 # v2.16.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Calculate short SHA
|
||||
id: short-sha
|
||||
run: echo "short_sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Dispatch v3 deployment (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"v3-latest","tag":"${{ steps.short-sha.outputs.short_sha }}"}'
|
||||
|
||||
- name: Dispatch v3 deployment (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"release","tag":"${{ needs.setup.outputs.prowler_version }}"}'
|
||||
|
||||
@@ -5,10 +5,22 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'Dockerfile*'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'Dockerfile*'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -56,16 +68,7 @@ jobs:
|
||||
|
||||
sdk-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -132,23 +135,22 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Build SDK container for ${{ matrix.arch }}
|
||||
- name: Build SDK container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }}
|
||||
|
||||
- name: Scan SDK container with Trivy for ${{ matrix.arch }}
|
||||
- name: Scan SDK container with Trivy
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -5,10 +5,26 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-tests.yml'
|
||||
- '.github/workflows/sdk-security.yml'
|
||||
- '.github/actions/setup-python-poetry/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-tests.yml'
|
||||
- '.github/workflows/sdk-security.yml'
|
||||
- '.github/actions/setup-python-poetry/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
@@ -209,11 +209,11 @@ jobs:
|
||||
echo "AWS service_paths='${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}'"
|
||||
|
||||
if [ "${STEPS_AWS_SERVICES_OUTPUTS_RUN_ALL}" = "true" ]; then
|
||||
poetry run pytest -p no:randomly -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
elif [ -z "${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}" ]; then
|
||||
echo "No AWS service paths detected; skipping AWS tests."
|
||||
else
|
||||
poetry run pytest -p no:randomly -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml ${STEPS_AWS_SERVICES_OUTPUTS_SERVICE_PATHS}
|
||||
fi
|
||||
env:
|
||||
STEPS_AWS_SERVICES_OUTPUTS_RUN_ALL: ${{ steps.aws-services.outputs.run_all }}
|
||||
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }},scope=${{ matrix.arch }}
|
||||
|
||||
# Create and push multi-architecture manifest
|
||||
create-manifest:
|
||||
|
||||
@@ -5,10 +5,16 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -57,16 +63,7 @@ jobs:
|
||||
|
||||
ui-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
arch: arm64
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -114,7 +111,7 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
- name: Build UI container for ${{ matrix.arch }}
|
||||
- name: Build UI container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0
|
||||
with:
|
||||
@@ -122,18 +119,17 @@ jobs:
|
||||
target: prod
|
||||
push: false
|
||||
load: true
|
||||
platforms: ${{ matrix.platform }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=${{ github.event_name == 'pull_request' && 'min' || 'max' }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
|
||||
- name: Scan UI container with Trivy for ${{ matrix.arch }}
|
||||
- name: Scan UI container with Trivy
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}-${{ matrix.arch }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
|
||||
@@ -15,6 +15,10 @@ on:
|
||||
- 'ui/**'
|
||||
- 'api/**' # API changes can affect UI E2E
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
@@ -266,7 +270,7 @@ jobs:
|
||||
with:
|
||||
name: playwright-report
|
||||
path: ui/playwright-report/
|
||||
retention-days: 30
|
||||
retention-days: 7
|
||||
|
||||
- name: Cleanup services
|
||||
if: always()
|
||||
|
||||
@@ -8,6 +8,7 @@ rules:
|
||||
- docs-bump-version.yml
|
||||
- issue-triage.lock.yml
|
||||
- mcp-container-build-push.yml
|
||||
- nightly-arm64-container-builds.yml
|
||||
- pr-merged.yml
|
||||
- prepare-release.yml
|
||||
- sdk-bump-version.yml
|
||||
|
||||
+39
-18
@@ -1,17 +1,34 @@
|
||||
# Priority tiers (lower = runs first, same priority = concurrent):
|
||||
# P0 — fast file fixers
|
||||
# P10 — validators and guards
|
||||
# P20 — auto-formatters
|
||||
# P30 — linters
|
||||
# P40 — security scanners
|
||||
# P50 — dependency validation
|
||||
|
||||
default_install_hook_types: [pre-commit]
|
||||
|
||||
repos:
|
||||
## GENERAL (prek built-in — no external repo needed)
|
||||
- repo: builtin
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
priority: 10
|
||||
- id: check-yaml
|
||||
args: ["--allow-multiple-documents"]
|
||||
exclude: (prowler/config/llm_config.yaml|contrib/)
|
||||
priority: 10
|
||||
- id: check-json
|
||||
priority: 10
|
||||
- id: end-of-file-fixer
|
||||
priority: 0
|
||||
- id: trailing-whitespace
|
||||
priority: 0
|
||||
- id: no-commit-to-branch
|
||||
priority: 10
|
||||
- id: pretty-format-json
|
||||
args: ["--autofix", --no-sort-keys, --no-ensure-ascii]
|
||||
priority: 10
|
||||
|
||||
## TOML
|
||||
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
||||
@@ -20,6 +37,7 @@ repos:
|
||||
- id: pretty-format-toml
|
||||
args: [--autofix]
|
||||
files: pyproject.toml
|
||||
priority: 20
|
||||
|
||||
## GITHUB ACTIONS
|
||||
- repo: https://github.com/zizmorcore/zizmor-pre-commit
|
||||
@@ -27,6 +45,7 @@ repos:
|
||||
hooks:
|
||||
- id: zizmor
|
||||
files: ^\.github/
|
||||
priority: 30
|
||||
|
||||
## BASH
|
||||
- repo: https://github.com/koalaman/shellcheck-precommit
|
||||
@@ -34,6 +53,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
priority: 30
|
||||
|
||||
## PYTHON — SDK (prowler/, tests/, dashboard/, util/, scripts/)
|
||||
- repo: https://github.com/myint/autoflake
|
||||
@@ -42,12 +62,8 @@ repos:
|
||||
- id: autoflake
|
||||
name: "SDK - autoflake"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
args:
|
||||
[
|
||||
"--in-place",
|
||||
"--remove-all-unused-imports",
|
||||
"--remove-unused-variable",
|
||||
]
|
||||
args: ["--in-place", "--remove-all-unused-imports", "--remove-unused-variable"]
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 8.0.1
|
||||
@@ -56,6 +72,7 @@ repos:
|
||||
name: "SDK - isort"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
args: ["--profile", "black"]
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 26.3.1
|
||||
@@ -63,6 +80,7 @@ repos:
|
||||
- id: black
|
||||
name: "SDK - black"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
priority: 20
|
||||
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 7.3.0
|
||||
@@ -71,6 +89,7 @@ repos:
|
||||
name: "SDK - flake8"
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
args: ["--ignore=E266,W503,E203,E501,W605"]
|
||||
priority: 30
|
||||
|
||||
## PYTHON — API + MCP Server (ruff)
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
@@ -80,9 +99,11 @@ repos:
|
||||
name: "API + MCP - ruff check"
|
||||
files: { glob: ["{api,mcp_server}/**/*.py"] }
|
||||
args: ["--fix"]
|
||||
priority: 30
|
||||
- id: ruff-format
|
||||
name: "API + MCP - ruff format"
|
||||
files: { glob: ["{api,mcp_server}/**/*.py"] }
|
||||
priority: 20
|
||||
|
||||
## PYTHON — Poetry
|
||||
- repo: https://github.com/python-poetry/poetry
|
||||
@@ -93,24 +114,28 @@ repos:
|
||||
args: ["--directory=./api"]
|
||||
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-lock
|
||||
name: API - poetry-lock
|
||||
args: ["--directory=./api"]
|
||||
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
files: { glob: ["{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--directory=./"]
|
||||
files: { glob: ["{pyproject.toml,poetry.lock}"] }
|
||||
pass_filenames: false
|
||||
priority: 50
|
||||
|
||||
## CONTAINERS
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
@@ -118,6 +143,7 @@ repos:
|
||||
hooks:
|
||||
- id: hadolint
|
||||
args: ["--ignore=DL3013"]
|
||||
priority: 30
|
||||
|
||||
## LOCAL HOOKS
|
||||
- repo: local
|
||||
@@ -128,6 +154,7 @@ repos:
|
||||
language: system
|
||||
types: [python]
|
||||
files: { glob: ["{prowler,tests,dashboard,util,scripts}/**/*.py"] }
|
||||
priority: 30
|
||||
|
||||
- id: trufflehog
|
||||
name: TruffleHog
|
||||
@@ -138,6 +165,7 @@ repos:
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: ["pre-commit", "pre-push"]
|
||||
priority: 40
|
||||
|
||||
- id: bandit
|
||||
name: bandit
|
||||
@@ -146,8 +174,8 @@ repos:
|
||||
language: system
|
||||
types: [python]
|
||||
files: '.*\.py'
|
||||
exclude:
|
||||
{ glob: ["{contrib,skills}/**", "**/.venv/**", "**/*_test.py"] }
|
||||
exclude: { glob: ["{contrib,skills}/**", "**/.venv/**", "**/*_test.py"] }
|
||||
priority: 40
|
||||
|
||||
- id: safety
|
||||
name: safety
|
||||
@@ -156,16 +184,8 @@ repos:
|
||||
entry: safety check --policy-file .safety-policy.yml
|
||||
language: system
|
||||
pass_filenames: false
|
||||
files:
|
||||
{
|
||||
glob:
|
||||
[
|
||||
"**/pyproject.toml",
|
||||
"**/poetry.lock",
|
||||
"**/requirements*.txt",
|
||||
".safety-policy.yml",
|
||||
],
|
||||
}
|
||||
files: { glob: ["**/pyproject.toml", "**/poetry.lock", "**/requirements*.txt", ".safety-policy.yml"] }
|
||||
priority: 40
|
||||
|
||||
- id: vulture
|
||||
name: vulture
|
||||
@@ -174,3 +194,4 @@ repos:
|
||||
language: system
|
||||
types: [python]
|
||||
files: '.*\.py'
|
||||
priority: 40
|
||||
|
||||
+1
-1
@@ -6,7 +6,7 @@ LABEL org.opencontainers.image.source="https://github.com/prowler-cloud/prowler"
|
||||
ARG POWERSHELL_VERSION=7.5.0
|
||||
ENV POWERSHELL_VERSION=${POWERSHELL_VERSION}
|
||||
|
||||
ARG TRIVY_VERSION=0.69.2
|
||||
ARG TRIVY_VERSION=0.70.0
|
||||
ENV TRIVY_VERSION=${TRIVY_VERSION}
|
||||
|
||||
ARG ZIZMOR_VERSION=1.24.1
|
||||
|
||||
@@ -104,22 +104,22 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) | Support | Interface |
|
||||
|---|---|---|---|---|---|---|
|
||||
| AWS | 572 | 83 | 41 | 17 | Official | UI, API, CLI |
|
||||
| Azure | 165 | 20 | 18 | 13 | Official | UI, API, CLI |
|
||||
| GCP | 100 | 13 | 15 | 11 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 7 | 9 | Official | UI, API, CLI |
|
||||
| GitHub | 21 | 2 | 1 | 2 | Official | UI, API, CLI |
|
||||
| M365 | 89 | 9 | 4 | 5 | Official | UI, API, CLI |
|
||||
| OCI | 48 | 13 | 3 | 10 | Official | UI, API, CLI |
|
||||
| Alibaba Cloud | 61 | 9 | 3 | 9 | Official | UI, API, CLI |
|
||||
| Cloudflare | 29 | 2 | 0 | 5 | Official | UI, API, CLI |
|
||||
| AWS | 595 | 84 | 43 | 17 | Official | UI, API, CLI |
|
||||
| Azure | 167 | 22 | 19 | 16 | Official | UI, API, CLI |
|
||||
| GCP | 102 | 18 | 17 | 12 | Official | UI, API, CLI |
|
||||
| Kubernetes | 83 | 7 | 7 | 11 | Official | UI, API, CLI |
|
||||
| GitHub | 24 | 3 | 1 | 5 | Official | UI, API, CLI |
|
||||
| M365 | 101 | 10 | 4 | 10 | Official | UI, API, CLI |
|
||||
| OCI | 51 | 14 | 4 | 10 | Official | UI, API, CLI |
|
||||
| Alibaba Cloud | 61 | 9 | 4 | 9 | Official | UI, API, CLI |
|
||||
| Cloudflare | 29 | 3 | 0 | 5 | Official | UI, API, CLI |
|
||||
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | UI, API, CLI |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 8 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| Image | N/A | N/A | N/A | N/A | Official | CLI, API |
|
||||
| Google Workspace | 1 | 1 | 0 | 1 | Official | CLI |
|
||||
| OpenStack | 27 | 4 | 0 | 8 | Official | UI, API, CLI |
|
||||
| Vercel | 30 | 6 | 0 | 5 | Official | CLI |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | CLI |
|
||||
| OpenStack | 34 | 5 | 0 | 9 | Official | UI, API, CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
> [!Note]
|
||||
|
||||
+24
-1
@@ -2,6 +2,29 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.27.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- New `scan-reset-ephemeral-resources` post-scan task zeroes `failed_findings_count` for resources missing from the latest full-scope scan, keeping ephemeral resources from polluting the Resources page sort [(#10929)](https://github.com/prowler-cloud/prowler/pull/10929)
|
||||
- ASD Essential Eight (AWS) compliance framework support [(#10982)](https://github.com/prowler-cloud/prowler/pull/10982)
|
||||
- `scan-reset-ephemeral-resources` post-scan task zeroes `failed_findings_count` for resources missing from the latest full-scope scan, keeping ephemeral resources from polluting the Resources page sort [(#10929)](https://github.com/prowler-cloud/prowler/pull/10929)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- `trivy` binary from 0.69.2 to 0.70.0 and `cryptography` from 46.0.6 to 46.0.7 (transitive via prowler SDK) in the API image for CVE-2026-33186 and CVE-2026-39892 [(#10978)](https://github.com/prowler-cloud/prowler/pull/10978)
|
||||
|
||||
---
|
||||
|
||||
## [1.26.1] (Prowler v5.25.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Attack Paths: AWS scans no longer fail when enabled regions cannot be retrieved, and scans stuck in `scheduled` state are now cleaned up after the stale threshold [(#10917)](https://github.com/prowler-cloud/prowler/pull/10917)
|
||||
- Scan report and compliance downloads now redirect to a presigned S3 URL instead of streaming through the API worker, preventing gunicorn timeouts on large files [(#10927)](https://github.com/prowler-cloud/prowler/pull/10927)
|
||||
|
||||
---
|
||||
|
||||
## [1.26.0] (Prowler v5.25.0)
|
||||
|
||||
### 🚀 Added
|
||||
@@ -12,7 +35,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Allows tenant owners to expel users from their organizations [(#10787)](https://github.com/prowler-cloud/prowler/pull/10787)
|
||||
- Allows tenant owners to expel users from their organizations [(#10787)](https://github.com/prowler-cloud/prowler/pull/10787)
|
||||
- `aggregate_findings`, `aggregate_attack_surface`, `aggregate_scan_resource_group_summaries` and `aggregate_scan_category_summaries` now upsert via `bulk_create(update_conflicts=True, ...)` instead of the prior `ignore_conflicts=True` / plain INSERT / `already backfilled` short-circuit. Re-runs triggered by the post-mute reaggregation pipeline no longer trip the `unique_*_per_scan` constraints nor silently drop updates, and are race-safe under concurrent writers (e.g. scan completion overlapping with a fresh mute rule) [(#10843)](https://github.com/prowler-cloud/prowler/pull/10843)
|
||||
- Rename the scan-category and scan-resource-group summary aggregators from `backfill_*` to `aggregate_*` [(#10843)](https://github.com/prowler-cloud/prowler/pull/10843)
|
||||
|
||||
|
||||
+1
-1
@@ -5,7 +5,7 @@ LABEL maintainer="https://github.com/prowler-cloud/api"
|
||||
ARG POWERSHELL_VERSION=7.5.0
|
||||
ENV POWERSHELL_VERSION=${POWERSHELL_VERSION}
|
||||
|
||||
ARG TRIVY_VERSION=0.69.2
|
||||
ARG TRIVY_VERSION=0.70.0
|
||||
ENV TRIVY_VERSION=${TRIVY_VERSION}
|
||||
|
||||
ARG ZIZMOR_VERSION=1.24.1
|
||||
|
||||
Generated
+61
-61
@@ -2504,61 +2504,61 @@ dev = ["bandit", "coverage", "flake8", "pydocstyle", "pylint", "pytest", "pytest
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.6"
|
||||
version = "46.0.7"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"},
|
||||
{file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win32.whl", hash = "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win32.whl", hash = "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win32.whl", hash = "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win_amd64.whl", hash = "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4"},
|
||||
{file = "cryptography-46.0.7.tar.gz", hash = "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2571,7 +2571,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
@@ -6665,7 +6665,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "prowler"
|
||||
version = "5.25.0"
|
||||
version = "5.26.0"
|
||||
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
|
||||
optional = false
|
||||
python-versions = ">=3.10,<3.13"
|
||||
@@ -6720,7 +6720,7 @@ boto3 = "1.40.61"
|
||||
botocore = "1.40.61"
|
||||
cloudflare = "4.3.1"
|
||||
colorama = "0.4.6"
|
||||
cryptography = "46.0.6"
|
||||
cryptography = "46.0.7"
|
||||
dash = "3.1.1"
|
||||
dash-bootstrap-components = "2.0.3"
|
||||
defusedxml = "0.7.1"
|
||||
@@ -6755,7 +6755,7 @@ uuid6 = "2024.7.10"
|
||||
type = "git"
|
||||
url = "https://github.com/prowler-cloud/prowler.git"
|
||||
reference = "master"
|
||||
resolved_reference = "ca29e354b622198ff6a70e2ea5eb04e4a44a0903"
|
||||
resolved_reference = "16798e293da365965120961e6539e3a9756564f9"
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
@@ -7912,26 +7912,26 @@ shaping = ["uharfbuzz"]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.5"
|
||||
version = "2.33.1"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"},
|
||||
{file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"},
|
||||
{file = "requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a"},
|
||||
{file = "requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=2017.4.17"
|
||||
certifi = ">=2023.5.7"
|
||||
charset_normalizer = ">=2,<4"
|
||||
idna = ">=2.5,<4"
|
||||
PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7", optional = true, markers = "extra == \"socks\""}
|
||||
urllib3 = ">=1.21.1,<3"
|
||||
urllib3 = ">=1.26,<3"
|
||||
|
||||
[package.extras]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
|
||||
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
|
||||
use-chardet-on-py3 = ["chardet (>=3.0.2,<8)"]
|
||||
|
||||
[[package]]
|
||||
name = "requests-file"
|
||||
|
||||
+2
-2
@@ -50,7 +50,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.26.0"
|
||||
version = "1.27.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
@@ -63,6 +63,7 @@ docker = "7.1.0"
|
||||
filelock = "3.20.3"
|
||||
freezegun = "1.5.1"
|
||||
mypy = "1.10.1"
|
||||
prek = "0.3.9"
|
||||
pylint = "3.2.5"
|
||||
pytest = "9.0.3"
|
||||
pytest-cov = "5.0.0"
|
||||
@@ -74,4 +75,3 @@ ruff = "0.5.0"
|
||||
safety = "3.7.0"
|
||||
tqdm = "4.67.1"
|
||||
vulture = "2.14"
|
||||
prek = "0.3.9"
|
||||
|
||||
@@ -52,7 +52,7 @@ class ApiConfig(AppConfig):
|
||||
"check_and_fix_socialaccount_sites_migration",
|
||||
]
|
||||
|
||||
# Skip Neo4j initialization during tests, some Django commands, and Celery
|
||||
# Skip eager Neo4j init for tests, some Django commands, and Celery (prefork pool: driver must stay lazy, no post_fork hook)
|
||||
if getattr(settings, "TESTING", False) or (
|
||||
len(sys.argv) > 1
|
||||
and (
|
||||
@@ -64,7 +64,7 @@ class ApiConfig(AppConfig):
|
||||
)
|
||||
):
|
||||
logger.info(
|
||||
"Skipping Neo4j initialization because tests, some Django commands or Celery"
|
||||
"Skipping eager Neo4j init: tests, some Django commands, or Celery prefork pool (driver stays lazy)"
|
||||
)
|
||||
|
||||
else:
|
||||
|
||||
@@ -595,10 +595,40 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
all_objects = models.Manager()
|
||||
|
||||
_SCOPING_SCANNER_ARG_KEYS_CACHE: tuple[str, ...] | None = None
|
||||
|
||||
@classmethod
|
||||
def get_scoping_scanner_arg_keys(cls) -> tuple[str, ...]:
|
||||
"""Return the scanner_args keys that mark a scan as scoped.
|
||||
|
||||
Derived from ``prowler.lib.scan.scan.Scan.__init__`` so the API stays
|
||||
in sync with whatever the SDK actually accepts as filters. Cached at
|
||||
class level — the signature is stable for the process lifetime.
|
||||
"""
|
||||
if cls._SCOPING_SCANNER_ARG_KEYS_CACHE is None:
|
||||
import inspect
|
||||
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
|
||||
params = inspect.signature(ProwlerScan.__init__).parameters
|
||||
cls._SCOPING_SCANNER_ARG_KEYS_CACHE = tuple(
|
||||
name for name in params if name not in ("self", "provider")
|
||||
)
|
||||
return cls._SCOPING_SCANNER_ARG_KEYS_CACHE
|
||||
|
||||
class TriggerChoices(models.TextChoices):
|
||||
SCHEDULED = "scheduled", _("Scheduled")
|
||||
MANUAL = "manual", _("Manual")
|
||||
|
||||
# Trigger values for scans that ran the SDK end-to-end. Imported scans (or
|
||||
# any future trigger) are intentionally NOT in this set — they may carry
|
||||
# only a partial slice of resources, so post-scan logic that depends on a
|
||||
# full-scope sweep (e.g. resetting ephemeral resource findings) must skip
|
||||
# them by default.
|
||||
LIVE_SCAN_TRIGGERS = frozenset(
|
||||
(TriggerChoices.SCHEDULED.value, TriggerChoices.MANUAL.value)
|
||||
)
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
name = models.CharField(
|
||||
blank=True, null=True, max_length=100, validators=[MinLengthValidator(3)]
|
||||
@@ -681,6 +711,24 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scans"
|
||||
|
||||
def is_full_scope(self) -> bool:
|
||||
"""Return True if this scan ran with no scoping filters at all.
|
||||
|
||||
Used to gate post-scan operations (such as resetting the
|
||||
failed_findings_count of resources missing from the scan) that are only
|
||||
safe when the scan covered every check, service, and category. Imported
|
||||
scans are NOT full-scope by definition — they may carry only a partial
|
||||
slice of resources, so they're rejected via ``trigger`` even before the
|
||||
scanner_args check.
|
||||
"""
|
||||
if self.trigger not in self.LIVE_SCAN_TRIGGERS:
|
||||
return False
|
||||
scanner_args = self.scanner_args or {}
|
||||
for key in self.get_scoping_scanner_arg_keys():
|
||||
if scanner_args.get(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class AttackPathsScan(RowLevelSecurityProtectedModel):
|
||||
objects = ActiveProviderManager()
|
||||
|
||||
+1620
-58
File diff suppressed because it is too large
Load Diff
@@ -3841,9 +3841,14 @@ class TestScanViewSet:
|
||||
"prowler-output-123_threatscore_report.pdf",
|
||||
)
|
||||
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/"
|
||||
"tenant-id/scan-id/threatscore/prowler-output-123_threatscore_report.pdf"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
mock_s3_client = Mock()
|
||||
mock_s3_client.list_objects_v2.return_value = {"Contents": [{"Key": pdf_key}]}
|
||||
mock_s3_client.get_object.return_value = {"Body": io.BytesIO(b"pdf-bytes")}
|
||||
mock_s3_client.generate_presigned_url.return_value = presigned_url
|
||||
|
||||
mock_env_str.return_value = bucket
|
||||
mock_get_s3_client.return_value = mock_s3_client
|
||||
@@ -3852,19 +3857,26 @@ class TestScanViewSet:
|
||||
url = reverse("scan-threatscore", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response["Content-Type"] == "application/pdf"
|
||||
assert response["Content-Disposition"].endswith(
|
||||
'"prowler-output-123_threatscore_report.pdf"'
|
||||
)
|
||||
assert response.content == b"pdf-bytes"
|
||||
assert response.status_code == status.HTTP_302_FOUND
|
||||
assert response["Location"] == presigned_url
|
||||
mock_s3_client.list_objects_v2.assert_called_once()
|
||||
mock_s3_client.get_object.assert_called_once_with(Bucket=bucket, Key=pdf_key)
|
||||
mock_s3_client.generate_presigned_url.assert_called_once_with(
|
||||
"get_object",
|
||||
Params={
|
||||
"Bucket": bucket,
|
||||
"Key": pdf_key,
|
||||
"ResponseContentDisposition": (
|
||||
'attachment; filename="prowler-output-123_threatscore_report.pdf"'
|
||||
),
|
||||
"ResponseContentType": "application/pdf",
|
||||
},
|
||||
ExpiresIn=300,
|
||||
)
|
||||
|
||||
def test_report_s3_success(self, authenticated_client, scans_fixture, monkeypatch):
|
||||
"""
|
||||
When output_location is an S3 URL and the S3 client returns the file successfully,
|
||||
the view should return the ZIP file with HTTP 200 and proper headers.
|
||||
When output_location is an S3 URL and the object exists,
|
||||
the view should return a 302 redirect to a presigned S3 URL.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
bucket = "test-bucket"
|
||||
@@ -3878,22 +3890,33 @@ class TestScanViewSet:
|
||||
type("env", (), {"str": lambda self, *args, **kwargs: "test-bucket"})(),
|
||||
)
|
||||
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/report.zip"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def get_object(self, Bucket, Key):
|
||||
def head_object(self, Bucket, Key):
|
||||
assert Bucket == bucket
|
||||
assert Key == key
|
||||
return {"Body": io.BytesIO(b"s3 zip content")}
|
||||
return {}
|
||||
|
||||
def generate_presigned_url(self, ClientMethod, Params, ExpiresIn):
|
||||
assert ClientMethod == "get_object"
|
||||
assert Params["Bucket"] == bucket
|
||||
assert Params["Key"] == key
|
||||
assert Params["ResponseContentDisposition"] == (
|
||||
'attachment; filename="report.zip"'
|
||||
)
|
||||
assert ExpiresIn == 300
|
||||
return presigned_url
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
expected_filename = os.path.basename("report.zip")
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{expected_filename}"' in content_disposition
|
||||
assert response.content == b"s3 zip content"
|
||||
assert response.status_code == status.HTTP_302_FOUND
|
||||
assert response["Location"] == presigned_url
|
||||
|
||||
def test_report_s3_success_no_local_files(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -4032,23 +4055,31 @@ class TestScanViewSet:
|
||||
)
|
||||
|
||||
match_key = "path/compliance/mitre_attack_aws.csv"
|
||||
presigned_url = (
|
||||
"https://test-bucket.s3.amazonaws.com/path/compliance/mitre_attack_aws.csv"
|
||||
"?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Expires=300"
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def list_objects_v2(self, Bucket, Prefix):
|
||||
return {"Contents": [{"Key": match_key}]}
|
||||
|
||||
def get_object(self, Bucket, Key):
|
||||
return {"Body": io.BytesIO(b"ignored")}
|
||||
def generate_presigned_url(self, ClientMethod, Params, ExpiresIn):
|
||||
assert ClientMethod == "get_object"
|
||||
assert Params["Key"] == match_key
|
||||
assert Params["ResponseContentDisposition"] == (
|
||||
'attachment; filename="mitre_attack_aws.csv"'
|
||||
)
|
||||
assert ExpiresIn == 300
|
||||
return presigned_url
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
framework = match_key.split("/")[-1].split(".")[0]
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith('filename="mitre_attack_aws.csv"')
|
||||
assert resp.status_code == status.HTTP_302_FOUND
|
||||
assert resp["Location"] == presigned_url
|
||||
|
||||
def test_compliance_s3_not_found(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -4251,8 +4282,8 @@ class TestScanViewSet:
|
||||
scan.save()
|
||||
|
||||
fake_client = MagicMock()
|
||||
fake_client.get_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "NoSuchKey"}}, "GetObject"
|
||||
fake_client.head_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "NoSuchKey"}}, "HeadObject"
|
||||
)
|
||||
mock_get_s3_client.return_value = fake_client
|
||||
|
||||
@@ -4275,8 +4306,8 @@ class TestScanViewSet:
|
||||
scan.save()
|
||||
|
||||
fake_client = MagicMock()
|
||||
fake_client.get_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "AccessDenied"}}, "GetObject"
|
||||
fake_client.head_object.side_effect = ClientError(
|
||||
{"Error": {"Code": "AccessDenied"}}, "HeadObject"
|
||||
)
|
||||
mock_get_s3_client.return_value = fake_client
|
||||
|
||||
|
||||
+112
-38
@@ -53,7 +53,7 @@ from django.db.models import (
|
||||
)
|
||||
from django.db.models.fields.json import KeyTextTransform
|
||||
from django.db.models.functions import Cast, Coalesce, RowNumber
|
||||
from django.http import HttpResponse, QueryDict
|
||||
from django.http import HttpResponse, HttpResponseBase, HttpResponseRedirect, QueryDict
|
||||
from django.shortcuts import redirect
|
||||
from django.urls import reverse
|
||||
from django.utils.dateparse import parse_date
|
||||
@@ -422,7 +422,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.26.0"
|
||||
spectacular_settings.VERSION = "1.27.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -2080,24 +2080,38 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
},
|
||||
)
|
||||
|
||||
def _load_file(self, path_pattern, s3=False, bucket=None, list_objects=False):
|
||||
def _load_file(
|
||||
self,
|
||||
path_pattern,
|
||||
s3=False,
|
||||
bucket=None,
|
||||
list_objects=False,
|
||||
content_type=None,
|
||||
):
|
||||
"""
|
||||
Loads a binary file (e.g., ZIP or CSV) and returns its content and filename.
|
||||
Resolve a report file location and return the bytes (filesystem) or a redirect (S3).
|
||||
|
||||
Depending on the input parameters, this method supports loading:
|
||||
- From S3 using a direct key.
|
||||
- From S3 by listing objects under a prefix and matching suffix.
|
||||
- From the local filesystem using glob pattern matching.
|
||||
- From S3 using a direct key, returns a 302 to a short-lived presigned URL.
|
||||
- From S3 by listing objects under a prefix and matching suffix, returns a 302 to a short-lived presigned URL.
|
||||
- From the local filesystem using glob pattern matching, returns the file bytes.
|
||||
|
||||
The S3 branch never streams bytes through the worker; this prevents gunicorn
|
||||
worker timeouts on large reports.
|
||||
|
||||
Args:
|
||||
path_pattern (str): The key or glob pattern representing the file location.
|
||||
s3 (bool, optional): Whether the file is stored in S3. Defaults to False.
|
||||
bucket (str, optional): The name of the S3 bucket, required if `s3=True`. Defaults to None.
|
||||
list_objects (bool, optional): If True and `s3=True`, list objects by prefix to find the file. Defaults to False.
|
||||
content_type (str, optional): On the S3 branch, forwarded as `ResponseContentType`
|
||||
so the presigned download advertises the same Content-Type the API used to send.
|
||||
Ignored on the filesystem branch.
|
||||
|
||||
Returns:
|
||||
tuple[bytes, str]: A tuple containing the file content as bytes and the filename if successful.
|
||||
Response: A DRF `Response` object with an appropriate status and error detail if an error occurs.
|
||||
tuple[bytes, str]: For the filesystem branch, the file content and filename.
|
||||
HttpResponseRedirect: For the S3 branch on success, a 302 redirect to a presigned `GetObject` URL.
|
||||
Response: For any error path, a DRF `Response` with an appropriate status and detail.
|
||||
"""
|
||||
if s3:
|
||||
try:
|
||||
@@ -2144,25 +2158,45 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
# path_pattern here is prefix, but in compliance we build correct suffix check before
|
||||
key = keys[0]
|
||||
else:
|
||||
# path_pattern is exact key
|
||||
# path_pattern is exact key; HEAD before presigning to preserve the 404 contract.
|
||||
key = path_pattern
|
||||
try:
|
||||
s3_obj = client.get_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code == "NoSuchKey":
|
||||
try:
|
||||
client.head_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code in ("NoSuchKey", "404"):
|
||||
return Response(
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
return Response(
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
return Response(
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
content = s3_obj["Body"].read()
|
||||
|
||||
filename = os.path.basename(key)
|
||||
# escape quotes and strip CR/LF so a malformed key cannot break out of the header
|
||||
safe_filename = (
|
||||
filename.replace("\\", "\\\\")
|
||||
.replace('"', '\\"')
|
||||
.replace("\r", "")
|
||||
.replace("\n", "")
|
||||
)
|
||||
params = {
|
||||
"Bucket": bucket,
|
||||
"Key": key,
|
||||
"ResponseContentDisposition": f'attachment; filename="{safe_filename}"',
|
||||
}
|
||||
if content_type:
|
||||
params["ResponseContentType"] = content_type
|
||||
url = client.generate_presigned_url(
|
||||
"get_object",
|
||||
Params=params,
|
||||
ExpiresIn=300,
|
||||
)
|
||||
return HttpResponseRedirect(url)
|
||||
else:
|
||||
files = glob.glob(path_pattern)
|
||||
if not files:
|
||||
@@ -2205,12 +2239,16 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
bucket = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
key_prefix = scan.output_location.removeprefix(f"s3://{bucket}/")
|
||||
loader = self._load_file(
|
||||
key_prefix, s3=True, bucket=bucket, list_objects=False
|
||||
key_prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=False,
|
||||
content_type="application/x-zip-compressed",
|
||||
)
|
||||
else:
|
||||
loader = self._load_file(scan.output_location, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2248,13 +2286,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
prefix = os.path.join(
|
||||
os.path.dirname(key_prefix), "compliance", f"{name}.csv"
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="text/csv",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "compliance", f"*_{name}.csv")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2287,13 +2331,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"cis",
|
||||
"*_cis_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "cis", "*_cis_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2327,13 +2377,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"threatscore",
|
||||
"*_threatscore_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "threatscore", "*_threatscore_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2367,13 +2423,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"ens",
|
||||
"*_ens_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "ens", "*_ens_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2406,13 +2468,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"nis2",
|
||||
"*_nis2_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "nis2", "*_nis2_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
@@ -2445,13 +2513,19 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"csa",
|
||||
"*_csa_report.pdf",
|
||||
)
|
||||
loader = self._load_file(prefix, s3=True, bucket=bucket, list_objects=True)
|
||||
loader = self._load_file(
|
||||
prefix,
|
||||
s3=True,
|
||||
bucket=bucket,
|
||||
list_objects=True,
|
||||
content_type="application/pdf",
|
||||
)
|
||||
else:
|
||||
base = os.path.dirname(scan.output_location)
|
||||
pattern = os.path.join(base, "csa", "*_csa_report.pdf")
|
||||
loader = self._load_file(pattern, s3=False)
|
||||
|
||||
if isinstance(loader, Response):
|
||||
if isinstance(loader, HttpResponseBase):
|
||||
return loader
|
||||
|
||||
content, filename = loader
|
||||
|
||||
@@ -49,7 +49,7 @@ def start_aws_ingestion(
|
||||
}
|
||||
|
||||
boto3_session = get_boto3_session(prowler_api_provider, prowler_sdk_provider)
|
||||
regions: list[str] = list(prowler_sdk_provider._enabled_regions)
|
||||
regions: list[str] = resolve_aws_regions(prowler_api_provider, prowler_sdk_provider)
|
||||
requested_syncs = list(cartography_aws.RESOURCE_FUNCTIONS.keys())
|
||||
|
||||
sync_args = cartography_aws._build_aws_sync_kwargs(
|
||||
@@ -226,6 +226,48 @@ def get_boto3_session(
|
||||
return boto3_session
|
||||
|
||||
|
||||
def resolve_aws_regions(
|
||||
prowler_api_provider: ProwlerAPIProvider,
|
||||
prowler_sdk_provider: ProwlerSDKProvider,
|
||||
) -> list[str]:
|
||||
"""Resolve the regions to scan, falling back when `_enabled_regions` is `None`.
|
||||
|
||||
The SDK silently sets `_enabled_regions` to `None` when `ec2:DescribeRegions`
|
||||
fails (missing IAM permission, transient error). Without a fallback the
|
||||
Cartography ingestion crashes with a non-actionable `TypeError`. Try the
|
||||
user's `audited_regions` next, then the partition's static region list.
|
||||
Excluded regions are honored on every branch.
|
||||
"""
|
||||
if prowler_sdk_provider._enabled_regions is not None:
|
||||
regions = set(prowler_sdk_provider._enabled_regions)
|
||||
|
||||
elif prowler_sdk_provider.identity.audited_regions:
|
||||
regions = set(prowler_sdk_provider.identity.audited_regions)
|
||||
|
||||
else:
|
||||
partition = prowler_sdk_provider.identity.partition
|
||||
try:
|
||||
regions = prowler_sdk_provider.get_available_aws_service_regions(
|
||||
"ec2", partition
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
f"No region data available for partition {partition!r}; "
|
||||
f"cannot determine regions to scan for "
|
||||
f"{prowler_api_provider.uid}"
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
f"Could not enumerate enabled regions for AWS account "
|
||||
f"{prowler_api_provider.uid}; falling back to all regions in "
|
||||
f"partition {partition!r}"
|
||||
)
|
||||
|
||||
excluded = set(getattr(prowler_sdk_provider, "_excluded_regions", None) or ())
|
||||
return sorted(regions - excluded)
|
||||
|
||||
|
||||
def get_aioboto3_session(boto3_session: boto3.Session) -> aioboto3.Session:
|
||||
return aioboto3.Session(botocore_session=boto3_session._session)
|
||||
|
||||
|
||||
@@ -18,28 +18,45 @@ logger = get_task_logger(__name__)
|
||||
|
||||
def cleanup_stale_attack_paths_scans() -> dict:
|
||||
"""
|
||||
Find `EXECUTING` `AttackPathsScan` scans whose workers are dead or that have
|
||||
exceeded the stale threshold, and mark them as `FAILED`.
|
||||
Mark stale `AttackPathsScan` rows as `FAILED`.
|
||||
|
||||
Two-pass detection:
|
||||
Covers two stuck-state scenarios:
|
||||
1. `EXECUTING` scans whose workers are dead, or that have exceeded the
|
||||
stale threshold while alive.
|
||||
2. `SCHEDULED` scans that never made it to a worker — parent scan
|
||||
crashed before dispatch, broker lost the message, etc. Detected by
|
||||
age plus the parent `Scan` no longer being in flight.
|
||||
"""
|
||||
threshold = timedelta(minutes=ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
cutoff = now - threshold
|
||||
|
||||
cleaned_up: list[str] = []
|
||||
cleaned_up.extend(_cleanup_stale_executing_scans(cutoff))
|
||||
cleaned_up.extend(_cleanup_stale_scheduled_scans(cutoff))
|
||||
|
||||
logger.info(
|
||||
f"Stale `AttackPathsScan` cleanup: {len(cleaned_up)} scan(s) cleaned up"
|
||||
)
|
||||
return {"cleaned_up_count": len(cleaned_up), "scan_ids": cleaned_up}
|
||||
|
||||
|
||||
def _cleanup_stale_executing_scans(cutoff: datetime) -> list[str]:
|
||||
"""
|
||||
Two-pass detection for `EXECUTING` scans:
|
||||
1. If `TaskResult.worker` exists, ping the worker.
|
||||
- Dead worker: cleanup immediately (any age).
|
||||
- Alive + past threshold: revoke the task, then cleanup.
|
||||
- Alive + within threshold: skip.
|
||||
2. If no worker field: fall back to time-based heuristic only.
|
||||
"""
|
||||
threshold = timedelta(minutes=ATTACK_PATHS_SCAN_STALE_THRESHOLD_MINUTES)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
cutoff = now - threshold
|
||||
|
||||
executing_scans = (
|
||||
executing_scans = list(
|
||||
AttackPathsScan.all_objects.using(MainRouter.admin_db)
|
||||
.filter(state=StateChoices.EXECUTING)
|
||||
.select_related("task__task_runner_task")
|
||||
)
|
||||
|
||||
# Cache worker liveness so each worker is pinged at most once
|
||||
executing_scans = list(executing_scans)
|
||||
workers = {
|
||||
tr.worker
|
||||
for scan in executing_scans
|
||||
@@ -48,7 +65,7 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
}
|
||||
worker_alive = {w: _is_worker_alive(w) for w in workers}
|
||||
|
||||
cleaned_up = []
|
||||
cleaned_up: list[str] = []
|
||||
|
||||
for scan in executing_scans:
|
||||
task_result = (
|
||||
@@ -65,9 +82,7 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
|
||||
# Alive but stale — revoke before cleanup
|
||||
_revoke_task(task_result)
|
||||
reason = (
|
||||
"Scan exceeded stale threshold — " "cleaned up by periodic task"
|
||||
)
|
||||
reason = "Scan exceeded stale threshold — cleaned up by periodic task"
|
||||
else:
|
||||
reason = "Worker dead — cleaned up by periodic task"
|
||||
else:
|
||||
@@ -82,10 +97,57 @@ def cleanup_stale_attack_paths_scans() -> dict:
|
||||
if _cleanup_scan(scan, task_result, reason):
|
||||
cleaned_up.append(str(scan.id))
|
||||
|
||||
logger.info(
|
||||
f"Stale `AttackPathsScan` cleanup: {len(cleaned_up)} scan(s) cleaned up"
|
||||
return cleaned_up
|
||||
|
||||
|
||||
def _cleanup_stale_scheduled_scans(cutoff: datetime) -> list[str]:
|
||||
"""
|
||||
Cleanup `SCHEDULED` scans that never reached a worker.
|
||||
|
||||
Detection:
|
||||
- `state == SCHEDULED`
|
||||
- `started_at < cutoff`
|
||||
- parent `Scan` is no longer in flight (terminal state or missing). This
|
||||
avoids cleaning up rows whose parent Prowler scan is legitimately still
|
||||
running.
|
||||
|
||||
For each match: revoke the queued task (best-effort; harmless if already
|
||||
consumed), atomically flip to `FAILED`, and mark the `TaskResult`. The
|
||||
temp Neo4j database is never created while `SCHEDULED`, so no drop is
|
||||
needed.
|
||||
"""
|
||||
scheduled_scans = list(
|
||||
AttackPathsScan.all_objects.using(MainRouter.admin_db)
|
||||
.filter(
|
||||
state=StateChoices.SCHEDULED,
|
||||
started_at__lt=cutoff,
|
||||
)
|
||||
.select_related("task__task_runner_task", "scan")
|
||||
)
|
||||
return {"cleaned_up_count": len(cleaned_up), "scan_ids": cleaned_up}
|
||||
|
||||
cleaned_up: list[str] = []
|
||||
parent_terminal = (
|
||||
StateChoices.COMPLETED,
|
||||
StateChoices.FAILED,
|
||||
StateChoices.CANCELLED,
|
||||
)
|
||||
|
||||
for scan in scheduled_scans:
|
||||
parent_scan = scan.scan
|
||||
if parent_scan is not None and parent_scan.state not in parent_terminal:
|
||||
continue
|
||||
|
||||
task_result = (
|
||||
getattr(scan.task, "task_runner_task", None) if scan.task else None
|
||||
)
|
||||
if task_result:
|
||||
_revoke_task(task_result, terminate=False)
|
||||
|
||||
reason = "Scan never started — cleaned up by periodic task"
|
||||
if _cleanup_scheduled_scan(scan, task_result, reason):
|
||||
cleaned_up.append(str(scan.id))
|
||||
|
||||
return cleaned_up
|
||||
|
||||
|
||||
def _is_worker_alive(worker: str) -> bool:
|
||||
@@ -98,12 +160,17 @@ def _is_worker_alive(worker: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _revoke_task(task_result) -> None:
|
||||
"""Send `SIGTERM` to a hung Celery task. Non-fatal on failure."""
|
||||
def _revoke_task(task_result, terminate: bool = True) -> None:
|
||||
"""Revoke a Celery task. Non-fatal on failure.
|
||||
|
||||
`terminate=True` SIGTERMs the worker if the task is mid-execution; use
|
||||
for EXECUTING cleanup. `terminate=False` only marks the task id revoked
|
||||
across workers, so any worker pulling the queued message discards it;
|
||||
use for SCHEDULED cleanup where the task hasn't run yet.
|
||||
"""
|
||||
try:
|
||||
current_app.control.revoke(
|
||||
task_result.task_id, terminate=True, signal="SIGTERM"
|
||||
)
|
||||
kwargs = {"terminate": True, "signal": "SIGTERM"} if terminate else {}
|
||||
current_app.control.revoke(task_result.task_id, **kwargs)
|
||||
logger.info(f"Revoked task {task_result.task_id}")
|
||||
except Exception:
|
||||
logger.exception(f"Failed to revoke task {task_result.task_id}")
|
||||
@@ -125,28 +192,64 @@ def _cleanup_scan(scan, task_result, reason: str) -> bool:
|
||||
except Exception:
|
||||
logger.exception(f"Failed to drop temp database {tmp_db_name}")
|
||||
|
||||
# 2. Lock row, verify still EXECUTING, mark FAILED — all atomic
|
||||
with rls_transaction(str(scan.tenant_id)):
|
||||
try:
|
||||
fresh_scan = AttackPathsScan.objects.select_for_update().get(id=scan.id)
|
||||
except AttackPathsScan.DoesNotExist:
|
||||
logger.warning(f"Scan {scan_id_str} no longer exists, skipping")
|
||||
return False
|
||||
fresh_scan = _finalize_failed_scan(scan, StateChoices.EXECUTING, reason)
|
||||
if fresh_scan is None:
|
||||
return False
|
||||
|
||||
if fresh_scan.state != StateChoices.EXECUTING:
|
||||
logger.info(f"Scan {scan_id_str} is now {fresh_scan.state}, skipping")
|
||||
return False
|
||||
|
||||
_mark_scan_finished(fresh_scan, StateChoices.FAILED, {"global_error": reason})
|
||||
|
||||
# 3. Mark `TaskResult` as `FAILURE` (not RLS-protected, outside lock)
|
||||
# Mark `TaskResult` as `FAILURE` (not RLS-protected, outside lock)
|
||||
if task_result:
|
||||
task_result.status = states.FAILURE
|
||||
task_result.date_done = datetime.now(tz=timezone.utc)
|
||||
task_result.save(update_fields=["status", "date_done"])
|
||||
|
||||
# 4. Recover graph_data_ready if provider data still exists
|
||||
recover_graph_data_ready(fresh_scan)
|
||||
|
||||
logger.info(f"Cleaned up stale scan {scan_id_str}: {reason}")
|
||||
return True
|
||||
|
||||
|
||||
def _cleanup_scheduled_scan(scan, task_result, reason: str) -> bool:
|
||||
"""
|
||||
Clean up a `SCHEDULED` scan that never reached a worker.
|
||||
|
||||
Skips the temp Neo4j drop — the database is only created once the worker
|
||||
enters `EXECUTING`, so dropping it here just produces noisy log output.
|
||||
|
||||
Returns `True` if the scan was actually cleaned up, `False` if skipped.
|
||||
"""
|
||||
scan_id_str = str(scan.id)
|
||||
|
||||
fresh_scan = _finalize_failed_scan(scan, StateChoices.SCHEDULED, reason)
|
||||
if fresh_scan is None:
|
||||
return False
|
||||
|
||||
if task_result:
|
||||
task_result.status = states.FAILURE
|
||||
task_result.date_done = datetime.now(tz=timezone.utc)
|
||||
task_result.save(update_fields=["status", "date_done"])
|
||||
|
||||
logger.info(f"Cleaned up scheduled scan {scan_id_str}: {reason}")
|
||||
return True
|
||||
|
||||
|
||||
def _finalize_failed_scan(scan, expected_state: str, reason: str):
|
||||
"""
|
||||
Atomically lock the row, verify it's still in `expected_state`, and
|
||||
mark it `FAILED`. Returns the locked row on success, `None` if the
|
||||
row is gone or has already moved on.
|
||||
"""
|
||||
scan_id_str = str(scan.id)
|
||||
with rls_transaction(str(scan.tenant_id)):
|
||||
try:
|
||||
fresh_scan = AttackPathsScan.objects.select_for_update().get(id=scan.id)
|
||||
except AttackPathsScan.DoesNotExist:
|
||||
logger.warning(f"Scan {scan_id_str} no longer exists, skipping")
|
||||
return None
|
||||
|
||||
if fresh_scan.state != expected_state:
|
||||
logger.info(f"Scan {scan_id_str} is now {fresh_scan.state}, skipping")
|
||||
return None
|
||||
|
||||
_mark_scan_finished(fresh_scan, StateChoices.FAILED, {"global_error": reason})
|
||||
|
||||
return fresh_scan
|
||||
|
||||
@@ -67,25 +67,52 @@ def retrieve_attack_paths_scan(
|
||||
return None
|
||||
|
||||
|
||||
def set_attack_paths_scan_task_id(
|
||||
tenant_id: str,
|
||||
scan_pk: str,
|
||||
task_id: str,
|
||||
) -> None:
|
||||
"""Persist the Celery `task_id` on the `AttackPathsScan` row.
|
||||
|
||||
Called at dispatch time (when `apply_async` returns) so the row carries
|
||||
the task id even while still `SCHEDULED`. This lets the periodic
|
||||
cleanup revoke queued messages for scans that never reached a worker.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
ProwlerAPIAttackPathsScan.objects.filter(id=scan_pk).update(task_id=task_id)
|
||||
|
||||
|
||||
def starting_attack_paths_scan(
|
||||
attack_paths_scan: ProwlerAPIAttackPathsScan,
|
||||
task_id: str,
|
||||
cartography_config: CartographyConfig,
|
||||
) -> None:
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
attack_paths_scan.task_id = task_id
|
||||
attack_paths_scan.state = StateChoices.EXECUTING
|
||||
attack_paths_scan.started_at = datetime.now(tz=timezone.utc)
|
||||
attack_paths_scan.update_tag = cartography_config.update_tag
|
||||
) -> bool:
|
||||
"""Flip the row from `SCHEDULED` to `EXECUTING` atomically.
|
||||
|
||||
attack_paths_scan.save(
|
||||
update_fields=[
|
||||
"task_id",
|
||||
"state",
|
||||
"started_at",
|
||||
"update_tag",
|
||||
]
|
||||
)
|
||||
Returns `False` if the row is gone or has already moved past
|
||||
`SCHEDULED` (e.g., periodic cleanup raced ahead and marked it
|
||||
`FAILED` while the worker message was still in flight).
|
||||
"""
|
||||
with rls_transaction(attack_paths_scan.tenant_id):
|
||||
try:
|
||||
locked = ProwlerAPIAttackPathsScan.objects.select_for_update().get(
|
||||
id=attack_paths_scan.id
|
||||
)
|
||||
except ProwlerAPIAttackPathsScan.DoesNotExist:
|
||||
return False
|
||||
|
||||
if locked.state != StateChoices.SCHEDULED:
|
||||
return False
|
||||
|
||||
locked.state = StateChoices.EXECUTING
|
||||
locked.started_at = datetime.now(tz=timezone.utc)
|
||||
locked.update_tag = cartography_config.update_tag
|
||||
locked.save(update_fields=["state", "started_at", "update_tag"])
|
||||
|
||||
# Keep the in-memory object the caller is holding in sync.
|
||||
attack_paths_scan.state = locked.state
|
||||
attack_paths_scan.started_at = locked.started_at
|
||||
attack_paths_scan.update_tag = locked.update_tag
|
||||
return True
|
||||
|
||||
|
||||
def _mark_scan_finished(
|
||||
|
||||
@@ -97,6 +97,19 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
)
|
||||
attack_paths_scan = db_utils.retrieve_attack_paths_scan(tenant_id, scan_id)
|
||||
|
||||
# Idempotency guard: cleanup may have flipped this row to a terminal state
|
||||
# while the message was still in flight. Bail out before touching state.
|
||||
if attack_paths_scan and attack_paths_scan.state in (
|
||||
StateChoices.FAILED,
|
||||
StateChoices.COMPLETED,
|
||||
StateChoices.CANCELLED,
|
||||
):
|
||||
logger.warning(
|
||||
f"Attack Paths scan {attack_paths_scan.id} already in terminal "
|
||||
f"state {attack_paths_scan.state}; skipping execution"
|
||||
)
|
||||
return {}
|
||||
|
||||
# Checks before starting the scan
|
||||
if not cartography_ingestion_function:
|
||||
ingestion_exceptions = {
|
||||
@@ -114,12 +127,17 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
|
||||
else:
|
||||
if not attack_paths_scan:
|
||||
# Safety net for in-flight messages or direct task invocations; dispatcher normally pre-creates the row.
|
||||
logger.warning(
|
||||
f"No Attack Paths Scan found for scan {scan_id} and tenant {tenant_id}, let's create it then"
|
||||
)
|
||||
attack_paths_scan = db_utils.create_attack_paths_scan(
|
||||
tenant_id, scan_id, prowler_api_provider.id
|
||||
)
|
||||
if attack_paths_scan and task_id:
|
||||
db_utils.set_attack_paths_scan_task_id(
|
||||
tenant_id, attack_paths_scan.id, task_id
|
||||
)
|
||||
|
||||
tmp_database_name = graph_database.get_database_name(
|
||||
attack_paths_scan.id, temporary=True
|
||||
@@ -141,9 +159,13 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
)
|
||||
|
||||
# Starting the Attack Paths scan
|
||||
db_utils.starting_attack_paths_scan(
|
||||
attack_paths_scan, task_id, tenant_cartography_config
|
||||
)
|
||||
if not db_utils.starting_attack_paths_scan(
|
||||
attack_paths_scan, tenant_cartography_config
|
||||
):
|
||||
logger.warning(
|
||||
f"Attack Paths scan {attack_paths_scan.id} no longer in SCHEDULED state; cleanup likely raced ahead"
|
||||
)
|
||||
return {}
|
||||
|
||||
scan_t0 = time.perf_counter()
|
||||
logger.info(
|
||||
|
||||
@@ -47,6 +47,9 @@ from prowler.lib.outputs.compliance.csa.csa_oraclecloud import OracleCloudCSA
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_azure import AzureENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_gcp import GCPENS
|
||||
from prowler.lib.outputs.compliance.asd_essential_eight.asd_essential_eight_aws import (
|
||||
ASDEssentialEightAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_aws import AWSISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_azure import AzureISO27001
|
||||
from prowler.lib.outputs.compliance.iso27001.iso27001_gcp import GCPISO27001
|
||||
@@ -100,6 +103,7 @@ COMPLIANCE_CLASS_MAP = {
|
||||
(lambda name: name.startswith("ccc_"), CCC_AWS),
|
||||
(lambda name: name.startswith("c5_"), AWSC5),
|
||||
(lambda name: name.startswith("csa_"), AWSCSA),
|
||||
(lambda name: name == "asd_essential_eight_aws", ASDEssentialEightAWS),
|
||||
],
|
||||
"azure": [
|
||||
(lambda name: name.startswith("cis_"), AzureCIS),
|
||||
|
||||
@@ -10,16 +10,29 @@ from typing import Any
|
||||
|
||||
import sentry_sdk
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from config.env import env
|
||||
from config.settings.celery import CELERY_DEADLOCK_ATTEMPTS
|
||||
from django.db import IntegrityError, OperationalError
|
||||
from django.db.models import Case, Count, IntegerField, Max, Min, Prefetch, Q, Sum, When
|
||||
from django.db.models import (
|
||||
Case,
|
||||
Count,
|
||||
Exists,
|
||||
IntegerField,
|
||||
Max,
|
||||
Min,
|
||||
OuterRef,
|
||||
Prefetch,
|
||||
Q,
|
||||
Sum,
|
||||
When,
|
||||
)
|
||||
from django.utils import timezone as django_timezone
|
||||
from tasks.jobs.queries import (
|
||||
COMPLIANCE_UPSERT_PROVIDER_SCORE_SQL,
|
||||
COMPLIANCE_UPSERT_TENANT_SUMMARY_SQL,
|
||||
)
|
||||
from tasks.utils import CustomEncoder
|
||||
from tasks.utils import CustomEncoder, batched
|
||||
|
||||
from api.compliance import PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE
|
||||
from api.constants import SEVERITY_ORDER
|
||||
@@ -189,8 +202,9 @@ def _get_attack_surface_mapping_from_provider(provider_type: str) -> dict:
|
||||
"iam_inline_policy_allows_privilege_escalation",
|
||||
},
|
||||
"ec2-imdsv1": {
|
||||
"ec2_instance_imdsv2_enabled"
|
||||
}, # AWS only - IMDSv1 enabled findings
|
||||
"ec2_instance_imdsv2_enabled",
|
||||
"ec2_instance_account_imdsv2_enabled",
|
||||
}, # AWS only - instance-level IMDSv1 exposure and account IMDS defaults
|
||||
}
|
||||
for category_name, check_ids in attack_surface_check_mappings.items():
|
||||
if check_ids is None:
|
||||
@@ -2069,3 +2083,169 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
|
||||
"created": created_count,
|
||||
"updated": updated_count,
|
||||
}
|
||||
|
||||
|
||||
def reset_ephemeral_resource_findings_count(tenant_id: str, scan_id: str) -> dict:
|
||||
"""Zero failed_findings_count for resources missing from a completed full-scope scan.
|
||||
|
||||
Resources that exist in the database for the scan's provider but were not
|
||||
touched by this scan are treated as ephemeral. We keep their historical
|
||||
findings, but reset the denormalized counter that drives the Resources page
|
||||
sort so they stop ranking at the top.
|
||||
|
||||
Skipped (no-op) when:
|
||||
- The scan is not in COMPLETED state.
|
||||
- The scan ran with any scoping filter in scanner_args (partial scope).
|
||||
|
||||
Query design (must scale to 500k+ resources per provider):
|
||||
Phase 1 — collect ephemeral IDs with one anti-join read.
|
||||
Outer filter ``(tenant_id, provider_id, failed_findings_count > 0)``
|
||||
uses ``resources_tenant_provider_idx``. The correlated
|
||||
``NOT EXISTS`` subquery hits the implicit unique index
|
||||
``(tenant_id, scan_id, resource_id)`` on ``ResourceScanSummary``.
|
||||
``NOT EXISTS`` (vs ``NOT IN``) is null-safe and lets the planner
|
||||
choose between hash anti-join and indexed nested-loop anti-join.
|
||||
``.iterator(chunk_size=...)`` skips the queryset cache so memory
|
||||
stays bounded while streaming UUIDs.
|
||||
Phase 2 — UPDATE in fixed-size batches.
|
||||
One large UPDATE would hold row-exclusive locks for seconds and
|
||||
create a WAL spike. Batched UPDATEs by ``id__in`` (~1k rows each)
|
||||
hit the primary key, keep each lock window ~50ms, bound WAL chunks,
|
||||
and let other writers proceed between batches.
|
||||
``failed_findings_count__gt=0`` in the UPDATE is idempotent under
|
||||
concurrent scans and skips no-op rewrites.
|
||||
Reads use the primary DB, not the replica: ``ResourceScanSummary`` rows
|
||||
were written by the same scan task that triggered this one, so replica
|
||||
lag could falsely classify scanned resources as ephemeral.
|
||||
|
||||
Scope detection (``Scan.is_full_scope()``) derives the set of scoping
|
||||
scanner_args from ``prowler.lib.scan.scan.Scan.__init__`` via
|
||||
introspection, so the API can never drift from the SDK's filter
|
||||
contract. Imported scans are also rejected by trigger — they may only
|
||||
cover a partial slice of resources.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
scan = Scan.objects.filter(tenant_id=tenant_id, id=scan_id).first()
|
||||
|
||||
if scan is None:
|
||||
logger.warning(f"Scan {scan_id} not found")
|
||||
return {"status": "skipped", "reason": "scan not found"}
|
||||
|
||||
if scan.state != StateChoices.COMPLETED:
|
||||
logger.info(f"Scan {scan_id} not completed; skipping ephemeral reset")
|
||||
return {"status": "skipped", "reason": "scan not completed"}
|
||||
|
||||
if not scan.is_full_scope():
|
||||
logger.info(
|
||||
f"Scan {scan_id} ran with scoping filters; skipping ephemeral reset"
|
||||
)
|
||||
return {"status": "skipped", "reason": "partial scan scope"}
|
||||
|
||||
# Race protection: if a newer completed full-scope scan exists for this
|
||||
# provider, our ResourceScanSummary set is stale relative to the resources'
|
||||
# current failed_findings_count values (which the newer scan already
|
||||
# refreshed). Wiping based on the older scan would zero counts the newer
|
||||
# scan just set. Skip and let the newer scan's reset task do the work; if
|
||||
# this task was delayed in the queue, that's the correct outcome.
|
||||
# `completed_at__isnull=False` is required: Postgres orders NULL first in
|
||||
# DESC, so a sibling COMPLETED scan with a missing completed_at would sort
|
||||
# as "newest" and incorrectly cause us to skip.
|
||||
with rls_transaction(tenant_id):
|
||||
latest_full_scope_scan_id = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=scan.provider_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
completed_at__isnull=False,
|
||||
)
|
||||
.order_by("-completed_at", "-inserted_at")
|
||||
.values_list("id", flat=True)
|
||||
.first()
|
||||
)
|
||||
if latest_full_scope_scan_id != scan.id:
|
||||
logger.info(
|
||||
f"Scan {scan_id} is not the latest completed scan for provider "
|
||||
f"{scan.provider_id}; skipping ephemeral reset"
|
||||
)
|
||||
return {"status": "skipped", "reason": "newer scan exists"}
|
||||
|
||||
# Defensive gate: ResourceScanSummary rows are written by perform_prowler_scan
|
||||
# via best-effort bulk_create. If those writes failed silently (or the scan
|
||||
# genuinely produced resources but no summaries were persisted), the
|
||||
# ~Exists(in_scan) anti-join below would classify EVERY resource for this
|
||||
# provider as ephemeral and zero their counts. Bail loudly instead.
|
||||
with rls_transaction(tenant_id):
|
||||
summaries_present = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists()
|
||||
if scan.unique_resource_count > 0 and not summaries_present:
|
||||
logger.error(
|
||||
f"Scan {scan_id} reports {scan.unique_resource_count} unique "
|
||||
f"resources but no ResourceScanSummary rows are persisted; "
|
||||
f"skipping ephemeral reset to avoid wiping valid counts"
|
||||
)
|
||||
return {"status": "skipped", "reason": "summaries missing"}
|
||||
|
||||
# Stays on the primary DB intentionally. ResourceScanSummary rows are
|
||||
# written by perform_prowler_scan in the same chain that triggered this
|
||||
# task, so replica lag could return an empty/partial summary set; a stale
|
||||
# read here would classify every Resource as ephemeral and wipe valid
|
||||
# failed_findings_count values on the primary. Same rationale as
|
||||
# update_provider_compliance_scores below in this module.
|
||||
# Materializing the ID list (rather than streaming the iterator into
|
||||
# batched UPDATEs) is intentional: it lets the UPDATEs run in their own
|
||||
# short rls_transactions instead of one long transaction holding row locks
|
||||
# on every batch. At 500k UUIDs the peak memory is ~40 MB — acceptable for
|
||||
# a Celery worker — and is the better trade-off versus a multi-second
|
||||
# write-lock window blocking concurrent scans.
|
||||
with rls_transaction(tenant_id):
|
||||
in_scan = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=OuterRef("pk"),
|
||||
)
|
||||
ephemeral_ids = list(
|
||||
Resource.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=scan.provider_id,
|
||||
failed_findings_count__gt=0,
|
||||
)
|
||||
.filter(~Exists(in_scan))
|
||||
.values_list("id", flat=True)
|
||||
.iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
|
||||
)
|
||||
|
||||
if not ephemeral_ids:
|
||||
logger.info(f"No ephemeral resources for scan {scan_id}")
|
||||
return {
|
||||
"status": "completed",
|
||||
"scan_id": str(scan_id),
|
||||
"provider_id": str(scan.provider_id),
|
||||
"reset": 0,
|
||||
}
|
||||
|
||||
total_updated = 0
|
||||
for batch, _ in batched(ephemeral_ids, DJANGO_FINDINGS_BATCH_SIZE):
|
||||
# batched() always yields a final tuple, which is empty when the input
|
||||
# length is an exact multiple of the batch size. Skip it so we don't
|
||||
# issue a no-op UPDATE ... WHERE id IN ().
|
||||
if not batch:
|
||||
continue
|
||||
with rls_transaction(tenant_id):
|
||||
total_updated += Resource.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id__in=batch,
|
||||
failed_findings_count__gt=0,
|
||||
).update(failed_findings_count=0)
|
||||
|
||||
logger.info(
|
||||
f"Ephemeral resource reset for scan {scan_id}: "
|
||||
f"{total_updated} resources zeroed for provider {scan.provider_id}"
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"scan_id": str(scan_id),
|
||||
"provider_id": str(scan.provider_id),
|
||||
"reset": total_updated,
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
reset_ephemeral_resource_findings_count,
|
||||
update_provider_compliance_scores,
|
||||
)
|
||||
from tasks.utils import (
|
||||
@@ -77,6 +78,7 @@ from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.generic.generic import GenericCompliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@@ -158,6 +160,13 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
generate_outputs_task.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
# post-scan task — runs in the parallel group so a
|
||||
# failure cannot cascade into reports or integrations. Its only
|
||||
# prerequisite is that perform_prowler_scan has committed
|
||||
# ResourceScanSummary, which is true by the time this chain fires.
|
||||
reset_ephemeral_resource_findings_count_task.si(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
),
|
||||
),
|
||||
group(
|
||||
# Use optimized task that generates both reports with shared queries
|
||||
@@ -173,10 +182,25 @@ def _perform_scan_complete_tasks(tenant_id: str, scan_id: str, provider_id: str)
|
||||
).apply_async()
|
||||
|
||||
if can_provider_run_attack_paths_scan(tenant_id, provider_id):
|
||||
perform_attack_paths_scan_task.apply_async(
|
||||
# Row is normally created upstream, so this is a safeguard so we can attach the task id below
|
||||
attack_paths_scan = attack_paths_db_utils.retrieve_attack_paths_scan(
|
||||
tenant_id, scan_id
|
||||
)
|
||||
if attack_paths_scan is None:
|
||||
attack_paths_scan = attack_paths_db_utils.create_attack_paths_scan(
|
||||
tenant_id, scan_id, provider_id
|
||||
)
|
||||
|
||||
# Persist the Celery task id so the periodic cleanup can revoke scans stuck in SCHEDULED
|
||||
result = perform_attack_paths_scan_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
|
||||
if attack_paths_scan and result:
|
||||
attack_paths_db_utils.set_attack_paths_scan_task_id(
|
||||
tenant_id, attack_paths_scan.id, result.task_id
|
||||
)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@set_tenant
|
||||
@@ -378,7 +402,8 @@ class AttackPathsScanRLSTask(RLSTask):
|
||||
SDK initialization, or Neo4j configuration errors during setup).
|
||||
"""
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, _einfo):
|
||||
def on_failure(self, exc, task_id, args, kwargs, _einfo): # noqa: ARG002
|
||||
del args # Required by Celery's Task.on_failure signature; not used.
|
||||
tenant_id = kwargs.get("tenant_id")
|
||||
scan_id = kwargs.get("scan_id")
|
||||
|
||||
@@ -775,6 +800,32 @@ def aggregate_daily_severity_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_daily_severity(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="scan-reset-ephemeral-resources", queue="overview")
|
||||
@handle_provider_deletion
|
||||
def reset_ephemeral_resource_findings_count_task(tenant_id: str, scan_id: str):
|
||||
"""Reset failed_findings_count for resources missing from a completed full-scope scan.
|
||||
|
||||
Failures are swallowed and returned as a status: this task lives inside the
|
||||
post-scan group, and Celery propagates group-member exceptions into the next
|
||||
chain step — meaning a crash here would block compliance reports and
|
||||
integrations. The reset is purely cosmetic (UI sort optimization), so a
|
||||
bad run is logged and absorbed rather than allowed to cascade.
|
||||
"""
|
||||
try:
|
||||
return reset_ephemeral_resource_findings_count(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001 — intentionally broad
|
||||
logger.exception(
|
||||
f"reset_ephemeral_resource_findings_count failed for scan {scan_id}: {exc}"
|
||||
)
|
||||
return {
|
||||
"status": "failed",
|
||||
"scan_id": str(scan_id),
|
||||
"reason": str(exc),
|
||||
}
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="scan-finding-group-summaries", queue="overview")
|
||||
@set_tenant(keep_tenant=True)
|
||||
@handle_provider_deletion
|
||||
|
||||
@@ -135,7 +135,7 @@ class TestAttackPathsRun:
|
||||
assert result == ingestion_result
|
||||
mock_retrieve_scan.assert_called_once_with(str(tenant.id), str(scan.id))
|
||||
mock_starting.assert_called_once()
|
||||
config = mock_starting.call_args[0][2]
|
||||
config = mock_starting.call_args[0][1]
|
||||
assert config.neo4j_database == "tenant-db"
|
||||
mock_get_db_name.assert_has_calls(
|
||||
[call(attack_paths_scan.id, temporary=True), call(provider.tenant_id)]
|
||||
@@ -2732,3 +2732,143 @@ class TestCleanupStaleAttackPathsScans:
|
||||
assert result["cleaned_up_count"] == 2
|
||||
# Worker should be pinged exactly once — cache prevents second ping
|
||||
mock_alive.assert_called_once_with("shared-worker@host")
|
||||
|
||||
# `SCHEDULED` state cleanup
|
||||
def _create_scheduled_scan(
|
||||
self,
|
||||
tenant,
|
||||
provider,
|
||||
*,
|
||||
age_minutes,
|
||||
parent_state,
|
||||
with_task=True,
|
||||
):
|
||||
"""Create a SCHEDULED AttackPathsScan with a parent Scan in `parent_state`.
|
||||
|
||||
`age_minutes` controls how far in the past `started_at` is set, so
|
||||
callers can place rows safely past the cleanup cutoff.
|
||||
"""
|
||||
parent_scan = Scan.objects.create(
|
||||
name="Parent Prowler scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=parent_state,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
|
||||
ap_scan = AttackPathsScan.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=provider,
|
||||
scan=parent_scan,
|
||||
state=StateChoices.SCHEDULED,
|
||||
started_at=datetime.now(tz=timezone.utc) - timedelta(minutes=age_minutes),
|
||||
)
|
||||
|
||||
task_result = None
|
||||
if with_task:
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id=str(ap_scan.id),
|
||||
task_name="attack-paths-scan-perform",
|
||||
status="PENDING",
|
||||
)
|
||||
task = Task.objects.create(
|
||||
id=task_result.task_id,
|
||||
task_runner_task=task_result,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
ap_scan.task = task
|
||||
ap_scan.save(update_fields=["task_id"])
|
||||
|
||||
return ap_scan, task_result
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._revoke_task")
|
||||
def test_cleans_up_scheduled_scan_when_parent_is_terminal(
|
||||
self,
|
||||
mock_revoke,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
ap_scan, task_result = self._create_scheduled_scan(
|
||||
tenant,
|
||||
provider,
|
||||
age_minutes=24 * 60 * 3, # 3 days, safely past any threshold
|
||||
parent_state=StateChoices.FAILED,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 1
|
||||
assert str(ap_scan.id) in result["scan_ids"]
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.FAILED
|
||||
assert ap_scan.progress == 100
|
||||
assert ap_scan.completed_at is not None
|
||||
assert ap_scan.ingestion_exceptions == {
|
||||
"global_error": "Scan never started — cleaned up by periodic task"
|
||||
}
|
||||
|
||||
# SCHEDULED revoke must NOT terminate a running worker
|
||||
mock_revoke.assert_called_once()
|
||||
assert mock_revoke.call_args.kwargs == {"terminate": False}
|
||||
|
||||
# Temp DB never created for SCHEDULED, so no drop attempted
|
||||
mock_drop_db.assert_not_called()
|
||||
# Tenant Neo4j data is untouched in this path
|
||||
mock_recover.assert_not_called()
|
||||
|
||||
task_result.refresh_from_db()
|
||||
assert task_result.status == "FAILURE"
|
||||
assert task_result.date_done is not None
|
||||
|
||||
@patch("tasks.jobs.attack_paths.cleanup.recover_graph_data_ready")
|
||||
@patch("tasks.jobs.attack_paths.cleanup.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.cleanup.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.cleanup._revoke_task")
|
||||
def test_skips_scheduled_scan_when_parent_still_in_flight(
|
||||
self,
|
||||
mock_revoke,
|
||||
mock_drop_db,
|
||||
mock_recover,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
from tasks.jobs.attack_paths.cleanup import cleanup_stale_attack_paths_scans
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
ap_scan, _ = self._create_scheduled_scan(
|
||||
tenant,
|
||||
provider,
|
||||
age_minutes=24 * 60 * 3,
|
||||
parent_state=StateChoices.EXECUTING,
|
||||
)
|
||||
|
||||
result = cleanup_stale_attack_paths_scans()
|
||||
|
||||
assert result["cleaned_up_count"] == 0
|
||||
|
||||
ap_scan.refresh_from_db()
|
||||
assert ap_scan.state == StateChoices.SCHEDULED
|
||||
mock_revoke.assert_not_called()
|
||||
|
||||
@@ -24,6 +24,7 @@ from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
create_compliance_requirements,
|
||||
perform_prowler_scan,
|
||||
reset_ephemeral_resource_findings_count,
|
||||
update_provider_compliance_scores,
|
||||
)
|
||||
from tasks.utils import CustomEncoder
|
||||
@@ -35,6 +36,7 @@ from api.models import (
|
||||
MuteRule,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
ScanSummary,
|
||||
StateChoices,
|
||||
@@ -3851,6 +3853,7 @@ class TestAggregateAttackSurface:
|
||||
in result["privilege-escalation"]
|
||||
)
|
||||
assert "ec2_instance_imdsv2_enabled" in result["ec2-imdsv1"]
|
||||
assert "ec2_instance_account_imdsv2_enabled" in result["ec2-imdsv1"]
|
||||
|
||||
@patch("tasks.jobs.scan.AttackSurfaceOverview.objects.bulk_create")
|
||||
@patch("tasks.jobs.scan.Finding.all_objects.filter")
|
||||
@@ -4335,3 +4338,315 @@ class TestUpdateProviderComplianceScores:
|
||||
assert any("provider_compliance_scores" in c for c in calls)
|
||||
assert any("tenant_compliance_summaries" in c for c in calls)
|
||||
assert any("pg_advisory_xact_lock" in c for c in calls)
|
||||
|
||||
|
||||
class TestScanIsFullScope:
|
||||
def _live_trigger(self):
|
||||
return Scan.TriggerChoices.MANUAL
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"scanner_args",
|
||||
[
|
||||
{},
|
||||
{"unrelated": "value"},
|
||||
{"checks": None},
|
||||
{"services": []},
|
||||
{"severities": ""},
|
||||
],
|
||||
)
|
||||
def test_full_scope_when_no_filters_present(self, scanner_args):
|
||||
scan = Scan(scanner_args=scanner_args, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is True
|
||||
|
||||
def test_full_scope_covers_every_sdk_kwarg(self):
|
||||
# Lock the predicate to whatever ProwlerScan's __init__ exposes today.
|
||||
# If the SDK adds a new filter, this test still passes via the
|
||||
# introspection-driven derivation; if it adds a non-filter kwarg
|
||||
# (e.g. provider-like), keep the exclusion list in sync in models.py.
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
import inspect
|
||||
|
||||
expected = tuple(
|
||||
name
|
||||
for name in inspect.signature(ProwlerScan.__init__).parameters
|
||||
if name not in ("self", "provider")
|
||||
)
|
||||
assert Scan.get_scoping_scanner_arg_keys() == expected
|
||||
# Spot-check a few well-known filters survive the introspection.
|
||||
assert "checks" in expected
|
||||
assert "services" in expected
|
||||
assert "severities" in expected
|
||||
|
||||
def test_partial_scope_for_each_sdk_filter(self):
|
||||
for key in Scan.get_scoping_scanner_arg_keys():
|
||||
scan = Scan(scanner_args={key: ["x"]}, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is False, f"{key} should mark scan as partial"
|
||||
|
||||
def test_imported_scan_is_never_full_scope(self):
|
||||
# Forward-defensive: any trigger outside LIVE_SCAN_TRIGGERS (e.g. a
|
||||
# future "imported" trigger) must never qualify, even with empty args.
|
||||
scan = Scan(scanner_args={}, trigger="imported")
|
||||
assert scan.is_full_scope() is False
|
||||
|
||||
def test_handles_none_scanner_args(self):
|
||||
scan = Scan(scanner_args=None, trigger=self._live_trigger())
|
||||
assert scan.is_full_scope() is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestResetEphemeralResourceFindingsCount:
|
||||
def _make_scan_summary(self, tenant_id, scan_id, resource):
|
||||
return ResourceScanSummary.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=resource.id,
|
||||
service=resource.service,
|
||||
region=resource.region,
|
||||
resource_type=resource.type,
|
||||
)
|
||||
|
||||
def test_resets_only_resources_missing_from_full_scope_scan(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, scan2, *_ = scans_fixture
|
||||
resource1, resource2, resource3 = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=3)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
Resource.objects.filter(id=resource3.id).update(failed_findings_count=7)
|
||||
|
||||
# Only resource1 was scanned in scan1; resource2 is ephemeral.
|
||||
self._make_scan_summary(tenant.id, scan1.id, resource1)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 1
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
resource3.refresh_from_db()
|
||||
|
||||
assert resource1.failed_findings_count == 3
|
||||
assert resource2.failed_findings_count == 0
|
||||
# Other provider's resource is never touched.
|
||||
assert resource3.failed_findings_count == 7
|
||||
|
||||
def test_skips_when_scan_not_completed(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(state=StateChoices.EXECUTING)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "scan not completed"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_skips_when_scan_has_scoping_filters(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
_, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(scanner_args={"checks": ["check1"]})
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "partial scan scope"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_skips_when_scan_not_found(self, tenants_fixture):
|
||||
tenant, *_ = tenants_fixture
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(uuid.uuid4())
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "scan not found"
|
||||
|
||||
def test_skips_when_newer_scan_completed_for_same_provider(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
# If a newer completed scan exists for the same provider, our
|
||||
# ResourceScanSummary set is stale relative to the resources' current
|
||||
# counts, and applying the diff would corrupt them.
|
||||
from datetime import timedelta
|
||||
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
provider, *_ = providers_fixture
|
||||
_, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
# Create a newer COMPLETED scan for the same provider, with an
|
||||
# explicit completed_at strictly after scan1's so ordering is
|
||||
# deterministic regardless of clock resolution.
|
||||
newer_completed_at = scan1.completed_at + timedelta(minutes=5)
|
||||
Scan.objects.create(
|
||||
name="Newer Scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at=newer_completed_at,
|
||||
completed_at=newer_completed_at,
|
||||
)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "newer scan exists"
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_does_not_touch_other_providers_resources(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
_, _, resource3 = resources_fixture
|
||||
|
||||
# resource3 belongs to provider2 with failed_findings_count > 0 and is
|
||||
# not in scan1's summary. It MUST NOT be reset.
|
||||
Resource.objects.filter(id=resource3.id).update(failed_findings_count=9)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 0
|
||||
|
||||
resource3.refresh_from_db()
|
||||
assert resource3.failed_findings_count == 9
|
||||
|
||||
def test_resources_already_zero_are_not_rewritten(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
# Both resources already at 0, neither in summary -> nothing to update.
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=0)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=0)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 0
|
||||
|
||||
def test_skips_when_summaries_missing_for_scan_with_resources(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
# Catastrophic guard: if a scan reports unique_resource_count > 0 but
|
||||
# no ResourceScanSummary rows are persisted (e.g. bulk_create silently
|
||||
# failed), the anti-join would classify EVERY resource as ephemeral
|
||||
# and zero their counts. The gate must skip and preserve the data.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Scan.objects.filter(id=scan1.id).update(unique_resource_count=10)
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=3)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "skipped"
|
||||
assert result["reason"] == "summaries missing"
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
assert resource1.failed_findings_count == 3
|
||||
assert resource2.failed_findings_count == 5
|
||||
|
||||
def test_ignores_sibling_scan_with_null_completed_at(
|
||||
self, tenants_fixture, scans_fixture, providers_fixture, resources_fixture
|
||||
):
|
||||
# Postgres orders NULL first in DESC; a sibling COMPLETED scan with a
|
||||
# missing completed_at must not be treated as the latest scan and
|
||||
# cause us to incorrectly skip the reset.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
provider, *_ = providers_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=5)
|
||||
self._make_scan_summary(tenant.id, scan1.id, resource1)
|
||||
|
||||
Scan.objects.create(
|
||||
name="Ghost Scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at=scan1.completed_at,
|
||||
completed_at=None,
|
||||
)
|
||||
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 1
|
||||
|
||||
resource2.refresh_from_db()
|
||||
assert resource2.failed_findings_count == 0
|
||||
|
||||
def test_batches_updates_when_many_ephemeral_resources(
|
||||
self, tenants_fixture, scans_fixture, resources_fixture
|
||||
):
|
||||
# Forces multiple batches to confirm the chunked UPDATE path executes
|
||||
# cleanly and the count is the sum across batches.
|
||||
tenant, *_ = tenants_fixture
|
||||
scan1, *_ = scans_fixture
|
||||
resource1, resource2, _ = resources_fixture
|
||||
|
||||
Resource.objects.filter(id=resource1.id).update(failed_findings_count=2)
|
||||
Resource.objects.filter(id=resource2.id).update(failed_findings_count=4)
|
||||
|
||||
# No ResourceScanSummary -> both resource1 and resource2 are ephemeral.
|
||||
# Force a 1-row batch via the shared findings batch size knob.
|
||||
with patch("tasks.jobs.scan.DJANGO_FINDINGS_BATCH_SIZE", 1):
|
||||
result = reset_ephemeral_resource_findings_count(
|
||||
tenant_id=str(tenant.id), scan_id=str(scan1.id)
|
||||
)
|
||||
|
||||
assert result["status"] == "completed"
|
||||
assert result["reset"] == 2
|
||||
|
||||
resource1.refresh_from_db()
|
||||
resource2.refresh_from_db()
|
||||
assert resource1.failed_findings_count == 0
|
||||
assert resource2.failed_findings_count == 0
|
||||
|
||||
@@ -842,6 +842,72 @@ class TestScanCompleteTasks:
|
||||
# Attack Paths task should be skipped when provider cannot run it
|
||||
mock_attack_paths_task.assert_not_called()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"row_pre_existing",
|
||||
[True, False],
|
||||
ids=["row-pre-existing", "row-missing-fallback"],
|
||||
)
|
||||
@patch("tasks.tasks.aggregate_attack_surface_task.apply_async")
|
||||
@patch("tasks.tasks.chain")
|
||||
@patch("tasks.tasks.create_compliance_requirements_task.si")
|
||||
@patch("tasks.tasks.update_provider_compliance_scores_task.si")
|
||||
@patch("tasks.tasks.perform_scan_summary_task.si")
|
||||
@patch("tasks.tasks.generate_outputs_task.si")
|
||||
@patch("tasks.tasks.generate_compliance_reports_task.si")
|
||||
@patch("tasks.tasks.check_integrations_task.si")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.set_attack_paths_scan_task_id")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.create_attack_paths_scan")
|
||||
@patch("tasks.tasks.attack_paths_db_utils.retrieve_attack_paths_scan")
|
||||
@patch("tasks.tasks.perform_attack_paths_scan_task.apply_async")
|
||||
@patch("tasks.tasks.can_provider_run_attack_paths_scan", return_value=True)
|
||||
def test_scan_complete_dispatches_attack_paths_scan(
|
||||
self,
|
||||
_mock_can_run_attack_paths,
|
||||
mock_attack_paths_task,
|
||||
mock_retrieve,
|
||||
mock_create,
|
||||
mock_set_task_id,
|
||||
mock_check_integrations_task,
|
||||
mock_compliance_reports_task,
|
||||
mock_outputs_task,
|
||||
mock_scan_summary_task,
|
||||
mock_update_compliance_scores_task,
|
||||
mock_compliance_requirements_task,
|
||||
mock_chain,
|
||||
mock_attack_surface_task,
|
||||
row_pre_existing,
|
||||
):
|
||||
"""When a provider can run Attack Paths, dispatch must:
|
||||
1. Reuse the existing row or create one if missing.
|
||||
2. Call apply_async on the Attack Paths task.
|
||||
3. Persist the returned Celery task id on the row.
|
||||
"""
|
||||
existing_row = MagicMock(id="ap-scan-id")
|
||||
if row_pre_existing:
|
||||
mock_retrieve.return_value = existing_row
|
||||
else:
|
||||
mock_retrieve.return_value = None
|
||||
mock_create.return_value = existing_row
|
||||
|
||||
async_result = MagicMock(task_id="celery-task-id")
|
||||
mock_attack_paths_task.return_value = async_result
|
||||
|
||||
_perform_scan_complete_tasks("tenant-id", "scan-id", "provider-id")
|
||||
|
||||
mock_retrieve.assert_called_once_with("tenant-id", "scan-id")
|
||||
if row_pre_existing:
|
||||
mock_create.assert_not_called()
|
||||
else:
|
||||
mock_create.assert_called_once_with("tenant-id", "scan-id", "provider-id")
|
||||
|
||||
mock_attack_paths_task.assert_called_once_with(
|
||||
kwargs={"tenant_id": "tenant-id", "scan_id": "scan-id"}
|
||||
)
|
||||
|
||||
mock_set_task_id.assert_called_once_with(
|
||||
"tenant-id", "ap-scan-id", "celery-task-id"
|
||||
)
|
||||
|
||||
|
||||
class TestAttackPathsTasks:
|
||||
@staticmethod
|
||||
|
||||
@@ -73,6 +73,58 @@ The best reference to understand how to implement a new service is following the
|
||||
- AWS API calls are wrapped in try/except blocks, with specific handling for `ClientError` and generic exceptions, always logging errors.
|
||||
- If ARN is not present for some resource, it can be constructed using string interpolation, always including partition, service, region, account, and resource ID.
|
||||
- Tags and additional attributes that cannot be retrieved from the default call, should be collected and stored for each resource using dedicated methods and threading using the resource object list as iterator.
|
||||
- When accessing dictionary values from AWS API responses, always use `.get()` with a default value instead of direct dictionary access (e.g., `response.get("Policies", {})` instead of `response["Policies"]`). AWS API responses may not always include all keys, and direct access can cause `KeyError` exceptions that break the entire scan for that service.
|
||||
|
||||
### Extending an Existing Service with New Attributes
|
||||
|
||||
When adding a new check that requires data not yet collected by an existing service, you need to extend the service by adding new attributes to its resource models and updating the data collection methods. This is a common contributor task that follows a consistent pattern:
|
||||
|
||||
1. **Identify the missing data**: Determine which AWS API call provides the data you need and whether it's already being called by the service.
|
||||
|
||||
2. **Add new attributes to the resource model**: Extend the Pydantic `BaseModel` class for the resource with the new fields. Use `Optional` types with `None` as the default value to maintain backward compatibility with existing checks.
|
||||
|
||||
3. **Update the data collection method**: Modify the existing method that fetches resource details to also extract and store the new attributes. If no existing method fetches the data, add a new method and call it in the constructor using `self.__threading_call__` if possible.
|
||||
|
||||
4. **Use safe dictionary access**: When extracting values from API responses, always use `.get()` with appropriate defaults to prevent `KeyError` exceptions when the API doesn't return certain fields.
|
||||
|
||||
#### Example: Adding DKIM Status to SES Identities
|
||||
|
||||
```python
|
||||
# Step 1 & 2: Add new fields to the resource model
|
||||
class Identity(BaseModel):
|
||||
name: str
|
||||
arn: str
|
||||
region: str
|
||||
type: Optional[str]
|
||||
policy: Optional[dict] = None
|
||||
tags: Optional[list] = []
|
||||
# New attributes for DKIM check
|
||||
dkim_status: Optional[str] = None
|
||||
dkim_signing_attributes_origin: Optional[str] = None
|
||||
|
||||
# Step 3: Update the data collection method
|
||||
def _get_email_identities(self, identity):
|
||||
try:
|
||||
regional_client = self.regional_clients[identity.region]
|
||||
identity_attributes = regional_client.get_email_identity(
|
||||
EmailIdentity=identity.name
|
||||
)
|
||||
# Step 4: Use .get() for safe dictionary access
|
||||
for content_key, content_value in identity_attributes.get("Policies", {}).items():
|
||||
identity.policy = loads(content)
|
||||
identity.tags = identity_attributes.get("Tags", [])
|
||||
# Extract new DKIM attributes
|
||||
identity.dkim_status = identity_attributes.get("DkimStatus")
|
||||
identity.dkim_signing_attributes_origin = (
|
||||
identity_attributes.get("DkimSigningAttributesOrigin")
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
```
|
||||
|
||||
5. **Update the service tests**: Add the new attributes to the test mock data and assertions to verify correct data extraction.
|
||||
|
||||
## Specific Patterns in AWS Checks
|
||||
|
||||
|
||||
@@ -215,3 +215,6 @@ Also is important to keep all code examples as short as possible, including the
|
||||
| e5 | M365 and Azure Entra checks enabled by or dependent on an E5 license (e.g., advanced threat protection, audit, DLP, and eDiscovery) |
|
||||
| privilege-escalation | Detects IAM policies or permissions that allow identities to elevate their privileges beyond their intended scope, potentially gaining administrator or higher-level access through specific action combinations |
|
||||
| ec2-imdsv1 | Identifies EC2 instances using Instance Metadata Service version 1 (IMDSv1), which is vulnerable to SSRF attacks and should be replaced with IMDSv2 for enhanced security |
|
||||
| vercel-hobby-plan | Vercel checks whose audited feature is available on the Hobby plan (and therefore also on Pro and Enterprise plans) |
|
||||
| vercel-pro-plan | Vercel checks whose audited feature requires a Pro plan or higher, including features also available on Enterprise or via supported paid add-ons for Pro plans |
|
||||
| vercel-enterprise-plan | Vercel checks whose audited feature requires the Enterprise plan |
|
||||
|
||||
@@ -27,14 +27,28 @@ The most common high level steps to create a new check are:
|
||||
|
||||
### Naming Format for Checks
|
||||
|
||||
Checks must be named following the format: `service_subservice_resource_action`.
|
||||
If you already know the check name when creating a request or implementing a check, use a descriptive identifier with lowercase letters and underscores only.
|
||||
|
||||
Recommended patterns:
|
||||
|
||||
- `<service>_<resource>_<best_practice>`
|
||||
|
||||
The name components are:
|
||||
|
||||
- `service` – The main service being audited (e.g., ec2, entra, iam, etc.)
|
||||
- `subservice` – An individual component or subset of functionality within the service that is being audited. This may correspond to a shortened version of the class attribute accessed within the check. If there is no subservice, just omit.
|
||||
- `resource` – The specific resource type being evaluated (e.g., instance, policy, role, etc.)
|
||||
- `action` – The security aspect or configuration being checked (e.g., public, encrypted, enabled, etc.)
|
||||
- `service` – The main service or product area being audited (e.g., ec2, entra, iam, bedrock).
|
||||
- `resource` – The resource, feature, or configuration being evaluated. It can be a single word or a compound phrase joined with underscores (e.g., instance, policy, guardrail, sensitive_information_filter).
|
||||
- `best_practice` – The expected secure state or best practice being checked (e.g., enabled, encrypted, restricted, configured, not_publicly_accessible).
|
||||
|
||||
Additional guidance:
|
||||
|
||||
- Use underscores only. Do not use hyphens.
|
||||
- Keep the name specific enough to describe the behavior of the check.
|
||||
- The first segment should match the service or product area whenever possible.
|
||||
|
||||
Examples:
|
||||
|
||||
- `s3_bucket_versioning_enabled`
|
||||
- `bedrock_guardrail_sensitive_information_filter_enabled`
|
||||
|
||||
### File Creation
|
||||
|
||||
@@ -387,7 +401,7 @@ Provides both code examples and best practice recommendations for addressing the
|
||||
|
||||
#### Categories
|
||||
|
||||
One or more functional groupings used for execution filtering (e.g., `internet-exposed`). You can define new categories just by adding to this field.
|
||||
One or more functional groupings used for execution filtering (e.g., `internet-exposed`). Categories must match the predefined values enforced by `CheckMetadata`; adding a new category requires updating the validator and the metadata documentation.
|
||||
|
||||
For the complete list of available categories, see [Categories Guidelines](/developer-guide/check-metadata-guidelines#categories-guidelines).
|
||||
|
||||
|
||||
@@ -134,6 +134,22 @@ prek installed at `.git/hooks/pre-commit`
|
||||
If pre-commit hooks were previously installed, run `prek install --overwrite` to replace the existing hook. Otherwise, both tools will run on each commit.
|
||||
</Warning>
|
||||
|
||||
#### Enable TruffleHog as a Pre-Push Hook
|
||||
|
||||
By default, only `pre-commit` hooks are installed. To enable [`TruffleHog`](https://github.com/trufflesecurity/trufflehog) secret scanning on every push, install the `pre-push` hook type explicitly:
|
||||
|
||||
```shell
|
||||
prek install --hook-type pre-push
|
||||
```
|
||||
|
||||
Successful installation should produce the following output:
|
||||
|
||||
```shell
|
||||
prek installed at `.git/hooks/pre-push`
|
||||
```
|
||||
|
||||
Once installed, TruffleHog runs before each push and blocks the operation when verified secrets are detected.
|
||||
|
||||
### Code Quality and Security Checks
|
||||
|
||||
Before merging pull requests, several automated checks and utilities ensure code security and updated dependencies:
|
||||
|
||||
@@ -1003,7 +1003,7 @@ class ProwlerArgumentParser:
|
||||
formatter_class=RawTextHelpFormatter,
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,nhn,dashboard,iac,your_provider} ...",
|
||||
epilog="""
|
||||
Available Cloud Providers:
|
||||
Available Providers:
|
||||
{aws,azure,gcp,kubernetes,m365,github,iac,nhn,your_provider}
|
||||
aws AWS Provider
|
||||
azure Azure Provider
|
||||
|
||||
@@ -0,0 +1,131 @@
|
||||
---
|
||||
title: 'Prowler Studio'
|
||||
---
|
||||
|
||||
**Prowler Studio is an AI workflow that ensures Claude Code follows Prowler's skills, guardrails, and best practices when creating new security checks.** What lands in the resulting pull request is consistent, tested, and ready for human review — not half-correct boilerplate that needs to be rewritten.
|
||||
|
||||
<Info>
|
||||
**Contributor Tool**: Prowler Studio is a workflow for advanced contributors adding new Prowler security checks. It is not part of Prowler Cloud, Prowler App, or Prowler CLI.
|
||||
</Info>
|
||||
|
||||
<Warning>
|
||||
**Preview Feature**: Prowler Studio is under active development and breaking changes are expected. Please report issues or share feedback on [GitHub](https://github.com/prowler-cloud/prowler-studio/issues) or in the [Slack community](https://goto.prowler.com/slack).
|
||||
</Warning>
|
||||
|
||||
<Card title="Prowler Studio Repository" icon="github" href="https://github.com/prowler-cloud/prowler-studio" horizontal>
|
||||
Clone the source code, install Prowler Studio, and explore the agent workflow in detail.
|
||||
</Card>
|
||||
|
||||
## The Problem
|
||||
|
||||
Adding a new check to [Prowler](https://github.com/prowler-cloud/prowler) is more than writing detection logic. A correct check has to:
|
||||
|
||||
- Match Prowler's exact service and check folder structure and naming conventions
|
||||
- Wire up metadata, severity, remediation, tests, and compliance mappings
|
||||
- Mirror the patterns used by the hundreds of existing checks in the same provider
|
||||
- Actually load when Prowler scans for available checks — silent structural mistakes are easy to make
|
||||
|
||||
Asking a general-purpose AI assistant to do this usually means guessing. It misses conventions, skips tests, or invents structure that looks right but does not load. The result is a half-correct PR that needs to be reviewed line by line or rewritten.
|
||||
|
||||
## The Solution
|
||||
|
||||
Prowler Studio enforces the workflow end-to-end. Describe the check once — a markdown ticket, a Jira issue, or a GitHub issue — and the workflow:
|
||||
|
||||
1. **Loads Prowler-specific skills into every agent.** Every step starts with the same context an experienced Prowler engineer would have in mind. See [AI Skills System](/developer-guide/ai-skills) for how skills are structured.
|
||||
2. **Runs specialized agents in sequence.** Implementation → testing → compliance mapping → review → PR creation. Each agent has one job and a tight scope.
|
||||
3. **Verifies as it goes.** The check must load in Prowler. Tests must pass. If something fails, the agent fixes it and re-runs (up to a bounded number of attempts) before moving on.
|
||||
4. **Produces a complete pull request.** Branch, passing check, tests, compliance mappings, and a pull request waiting for human review.
|
||||
|
||||
The result is a consistent starting point, every time, on every supported provider.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Install
|
||||
|
||||
Prowler Studio requires [`uv`](https://docs.astral.sh/uv/getting-started/installation/) — see the official [installation guide](https://docs.astral.sh/uv/getting-started/installation/).
|
||||
|
||||
```bash
|
||||
git clone https://github.com/prowler-cloud/prowler-studio
|
||||
cd prowler-studio
|
||||
uv sync
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
### Describe the Check
|
||||
|
||||
A ticket is a structured markdown description of the check to create. It is the only input the workflow needs; every agent (implementation, testing, compliance mapping, review, PR creation) uses it as the source of truth, so the more concrete it is, the closer the first PR will land to the desired outcome.
|
||||
|
||||
The ticket can be supplied in three ways:
|
||||
|
||||
- **Local markdown file** → `--ticket path/to/ticket.md`
|
||||
- **Jira issue** → `--jira-url https://...` (uses the issue body)
|
||||
- **GitHub issue** → `--github-url https://...` (uses the issue body)
|
||||
|
||||
The content should follow the **New Check Request** template:
|
||||
|
||||
- The local copy at [`check_ticket_template.md`](https://github.com/prowler-cloud/prowler-studio/blob/main/check_ticket_template.md) covers `--ticket` and Jira tickets.
|
||||
- A prefilled GitHub form is also available: [Create a New Check Request issue](https://github.com/prowler-cloud/prowler/issues/new?template=new-check-request.yml).
|
||||
|
||||
Sections marked *Optional* can be skipped; everything else helps the agents make the right decisions.
|
||||
|
||||
### Run the Workflow
|
||||
|
||||
From a local markdown ticket:
|
||||
|
||||
```bash
|
||||
prowler-studio --ticket check_ticket.md
|
||||
```
|
||||
|
||||
From a Jira ticket:
|
||||
|
||||
```bash
|
||||
prowler-studio --jira-url https://mycompany.atlassian.net/browse/PROJ-123
|
||||
```
|
||||
|
||||
From a GitHub issue:
|
||||
|
||||
```bash
|
||||
prowler-studio --github-url https://github.com/owner/repo/issues/123
|
||||
```
|
||||
|
||||
<Note>
|
||||
Provide exactly one of `--ticket`, `--jira-url`, or `--github-url`.
|
||||
</Note>
|
||||
|
||||
Keep changes local (no push, no pull request):
|
||||
|
||||
```bash
|
||||
prowler-studio -b feat/my-check --ticket check_ticket.md --local
|
||||
```
|
||||
|
||||
### What You Get
|
||||
|
||||
After a successful run the working environment contains:
|
||||
|
||||
- A new branch on a clean Prowler worktree containing the check, metadata, tests, and compliance mappings
|
||||
- A pull request opened against Prowler (skipped with `--local`)
|
||||
- A timestamped log file under `logs/` capturing every step the agents took
|
||||
|
||||
## CLI Options
|
||||
|
||||
| Option | Short | Description |
|
||||
|--------|-------|-------------|
|
||||
| `--branch` | `-b` | Branch name (default: `feat/<ticket>-<check_name>` or `feat/<check_name>`) |
|
||||
| `--ticket` | `-t` | Path to a markdown check ticket file |
|
||||
| `--jira-url` | `-j` | Jira ticket URL (e.g., `https://mycompany.atlassian.net/browse/PROJ-123`) |
|
||||
| `--github-url` | `-g` | GitHub issue URL (e.g., `https://github.com/owner/repo/issues/123`) |
|
||||
| `--working-dir` | `-w` | Working directory for the Prowler clone (default: `./working`) |
|
||||
| `--no-worktree` | | Legacy mode — work directly on the main clone instead of using worktrees |
|
||||
| `--cleanup-worktree` | | Remove the worktree after a successful pull request is created |
|
||||
| `--local` | | Keep changes local — skip push and pull request creation |
|
||||
|
||||
## Configuration
|
||||
|
||||
Set these environment variables depending on the input source:
|
||||
|
||||
| Variable | When Needed | Purpose |
|
||||
|----------|-------------|---------|
|
||||
| `GITHUB_TOKEN` | `--github-url` (recommended) | Higher GitHub API rate limits and access to private issues |
|
||||
| `JIRA_SITE_URL` | `--jira-url` | Jira site, e.g. `https://mycompany.atlassian.net` |
|
||||
| `JIRA_EMAIL` | `--jira-url` | Email of the Jira account used to fetch the ticket |
|
||||
| `JIRA_API_TOKEN` | `--jira-url` | API token for the Jira account |
|
||||
+2
-1
@@ -365,7 +365,8 @@
|
||||
"developer-guide/security-compliance-framework",
|
||||
"developer-guide/lighthouse-architecture",
|
||||
"developer-guide/mcp-server",
|
||||
"developer-guide/ai-skills"
|
||||
"developer-guide/ai-skills",
|
||||
"developer-guide/prowler-studio"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,11 +32,11 @@ Access Prowler App by logging in with **email and password**.
|
||||
|
||||
<img src="/images/log-in.png" alt="Log In" width="285" />
|
||||
|
||||
## Add Cloud Provider
|
||||
## Add Provider
|
||||
|
||||
Configure a cloud provider for scanning:
|
||||
Configure a provider for scanning:
|
||||
|
||||
1. Navigate to `Settings > Cloud Providers` and click `Add Account`.
|
||||
1. Navigate to `Settings > Providers` and click `Add Provider`.
|
||||
2. Select the cloud provider.
|
||||
3. Enter the provider's identifier (Optional: Add an alias):
|
||||
- **AWS**: Account ID
|
||||
|
||||
@@ -121,8 +121,8 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.24.0"
|
||||
PROWLER_API_VERSION="5.24.0"
|
||||
PROWLER_UI_VERSION="5.25.3"
|
||||
PROWLER_API_VERSION="5.25.3"
|
||||
```
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -159,6 +159,40 @@ When these environment variables are set, the API will use them directly instead
|
||||
A fix addressing this permission issue is being evaluated in [PR #9953](https://github.com/prowler-cloud/prowler/pull/9953).
|
||||
</Note>
|
||||
|
||||
### Scan Stuck in Executing State After Worker Crash
|
||||
|
||||
When running Prowler App via Docker Compose, a scan may remain indefinitely in the `executing` state if the worker process crashes (for example, due to an Out of Memory condition) before it can update the scan status. Since it is not currently possible to cancel a scan in `executing` state through the UI, the workaround is to manually update the scan record in the database.
|
||||
|
||||
**Root Cause:**
|
||||
|
||||
The Celery worker process terminates unexpectedly (OOM, node failure, etc.) before transitioning the scan state to `completed` or `failed`. The scan record remains in `executing` with no active process to advance it.
|
||||
|
||||
**Solution:**
|
||||
|
||||
Connect to the database using the `prowler_admin` user. Due to Row-Level Security (RLS), the default database user cannot see scan records — you must use `prowler_admin`:
|
||||
|
||||
```bash
|
||||
psql -U prowler_admin -d prowler_db
|
||||
```
|
||||
|
||||
Identify the stuck scan by filtering for scans in `executing` state:
|
||||
|
||||
```sql
|
||||
SELECT id, name, state, started_at FROM scans WHERE state = 'executing';
|
||||
```
|
||||
|
||||
Update the scan state to `failed` using the scan ID:
|
||||
|
||||
```sql
|
||||
UPDATE scans SET state = 'failed' WHERE id = '<scan-id>';
|
||||
```
|
||||
|
||||
After this change, the scan will appear as failed in the UI and you can launch a new scan.
|
||||
|
||||
<Note>
|
||||
A feature to cancel executing scans directly from the UI is being tracked in [GitHub Issue #6893](https://github.com/prowler-cloud/prowler/issues/6893).
|
||||
</Note>
|
||||
|
||||
### SAML/OAuth ACS URL Incorrect When Running Behind a Proxy or Load Balancer
|
||||
|
||||
See [GitHub Issue #9724](https://github.com/prowler-cloud/prowler/issues/9724) for more details.
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
title: 'Prowler Check Kreator'
|
||||
---
|
||||
|
||||
<Note>
|
||||
Currently, this tool is only available for creating checks for the AWS provider.
|
||||
|
||||
</Note>
|
||||
<Note>
|
||||
If you are looking for a way to create new checks for all the supported providers, you can use [Prowler Studio](https://github.com/prowler-cloud/prowler-studio), it is an AI-powered toolkit for generating and managing security checks for Prowler (better version of the Check Kreator).
|
||||
|
||||
</Note>
|
||||
## Introduction
|
||||
|
||||
**Prowler Check Kreator** is a utility designed to streamline the creation of new checks for Prowler. This tool generates all necessary files required to add a new check to the Prowler repository. Specifically, it creates:
|
||||
|
||||
- A dedicated folder for the check.
|
||||
- The main check script.
|
||||
- A metadata file with essential details.
|
||||
- A folder and file structure for testing the check.
|
||||
|
||||
## Usage
|
||||
|
||||
To use the tool, execute the main script with the following command:
|
||||
|
||||
```bash
|
||||
python util/prowler_check_kreator/prowler_check_kreator.py <prowler_provider> <check_name>
|
||||
```
|
||||
|
||||
Parameters:
|
||||
|
||||
- `<prowler_provider>`: Currently only AWS is supported.
|
||||
- `<check_name>`: The name you wish to assign to the new check.
|
||||
|
||||
## AI integration
|
||||
|
||||
This tool optionally integrates AI to assist in generating the check code and metadata file content. When AI assistance is chosen, the tool uses [Gemini](https://gemini.google.com/) to produce preliminary code and metadata.
|
||||
|
||||
<Note>
|
||||
For this feature to work, you must have the library `google-generativeai` installed in your Python environment.
|
||||
|
||||
</Note>
|
||||
<Warning>
|
||||
AI-generated code and metadata might contain errors or require adjustments to align with specific Prowler requirements. Carefully review all AI-generated content before committing.
|
||||
|
||||
</Warning>
|
||||
To enable AI assistance, simply confirm when prompted by the tool. Additionally, ensure that the `GEMINI_API_KEY` environment variable is set with a valid Gemini API key. For instructions on obtaining your API key, refer to the [Gemini documentation](https://ai.google.dev/gemini-api/docs/api-key).
|
||||
@@ -40,13 +40,13 @@ Before you begin, make sure you have:
|
||||
### Step 2: Access Prowler Cloud
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to "Configuration" > "Cloud Providers"
|
||||
2. Go to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Alibaba Cloud"
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ title: 'Getting Started With AWS on Prowler'
|
||||
### Step 2: Access Prowler Cloud
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to "Configuration" > "Cloud Providers"
|
||||
2. Go to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Amazon Web Services"
|
||||
|
||||
|
||||
@@ -35,13 +35,13 @@ For detailed instructions on how to create the Service Principal and configure p
|
||||
### Step 2: Access Prowler Cloud
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Navigate to `Configuration` > `Cloud Providers`
|
||||
2. Navigate to `Configuration` > `Providers`
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click on `Add Cloud Provider`
|
||||
3. Click on `Add Provider`
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select `Microsoft Azure`
|
||||
|
||||
|
||||
@@ -42,13 +42,13 @@ The Account ID is a 32-character hexadecimal string (e.g., `372e67954025e0ba6aaa
|
||||
### Step 2: Open Prowler Cloud
|
||||
|
||||
1. Go to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Navigate to "Configuration" > "Cloud Providers".
|
||||
2. Navigate to "Configuration" > "Providers".
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider".
|
||||
3. Click "Add Provider".
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Cloudflare".
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@ title: 'Getting Started With GCP on Prowler'
|
||||
### Step 2: Access Prowler Cloud
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to "Configuration" > "Cloud Providers"
|
||||
2. Go to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Google Cloud Platform"
|
||||
|
||||
|
||||
@@ -275,7 +275,7 @@ For step-by-step setup instructions for Prowler Cloud, see the [Getting Started
|
||||
|
||||
### Using Personal Access Token
|
||||
|
||||
1. In Prowler Cloud, navigate to **Configuration** > **Cloud Providers** > **Add Cloud Provider** > **GitHub**.
|
||||
1. In Prowler Cloud, navigate to **Configuration** > **Providers** > **Add Provider** > **GitHub**.
|
||||
|
||||
2. Enter your GitHub Account ID (username or organization name).
|
||||
|
||||
|
||||
@@ -49,13 +49,13 @@ Before adding GitHub to Prowler Cloud/App, ensure you have:
|
||||
### Step 1: Access Prowler Cloud/App
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to **Configuration** → **Cloud Providers**
|
||||
2. Go to **Configuration** → **Providers**
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click **Add Cloud Provider**
|
||||
3. Click **Add Provider**
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select **GitHub**
|
||||
|
||||
|
||||
@@ -43,13 +43,13 @@ The Customer ID starts with the letter "C" followed by alphanumeric characters (
|
||||
### Step 2: Open Prowler Cloud
|
||||
|
||||
1. Go to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Navigate to "Configuration" > "Cloud Providers".
|
||||
2. Navigate to "Configuration" > "Providers".
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider".
|
||||
3. Click "Add Provider".
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Google Workspace".
|
||||
|
||||
|
||||
@@ -42,13 +42,13 @@ Scanner selection is not configurable in Prowler App. Default scanners, misconfi
|
||||
### Step 1: Access Prowler Cloud/App
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to "Configuration" > "Cloud Providers"
|
||||
2. Go to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Infrastructure as Code"
|
||||
|
||||
|
||||
@@ -34,13 +34,13 @@ Prowler Cloud does not support scanner selection. The vulnerability, secret, and
|
||||
### Step 1: Access Prowler Cloud
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Navigate to "Configuration" > "Cloud Providers"
|
||||
2. Navigate to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Container Registry"
|
||||
|
||||
|
||||
@@ -7,13 +7,13 @@ title: 'Getting Started with Kubernetes'
|
||||
### Step 1: Access Prowler Cloud/App
|
||||
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app)
|
||||
2. Go to "Configuration" > "Cloud Providers"
|
||||
2. Go to "Configuration" > "Providers"
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider"
|
||||
3. Click "Add Provider"
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Kubernetes"
|
||||
|
||||
|
||||
@@ -42,13 +42,13 @@ Set up authentication for Microsoft 365 with the [Microsoft 365 Authentication](
|
||||
### Step 2: Open Prowler Cloud
|
||||
|
||||
1. Go to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Navigate to "Configuration" > "Cloud Providers".
|
||||
2. Navigate to "Configuration" > "Providers".
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider".
|
||||
3. Click "Add Provider".
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Microsoft 365".
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ If **Require IP Access List for the Atlas Administration API** is enabled in you
|
||||
|
||||
### Step 1: Add the provider
|
||||
|
||||
1. Navigate to **Cloud Providers** and click **Add Cloud Provider**.
|
||||
1. Navigate to **Providers** and click **Add Provider**.
|
||||

|
||||
2. Select **MongoDB Atlas** from the provider list.
|
||||
3. Enter your **Organization ID** (24 hex characters). This value is visible in the Atlas UI under **Organization Settings**.
|
||||
|
||||
@@ -16,8 +16,8 @@ The following steps apply to Prowler Cloud and the self-hosted Prowler App.
|
||||
|
||||
### Step 2: Access Prowler Cloud
|
||||
1. Navigate to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Go to **Configuration** → **Cloud Providers** and click **Add Cloud Provider**.
|
||||

|
||||
2. Go to **Configuration** → **Providers** and click **Add Provider**.
|
||||

|
||||
3. Select **Oracle Cloud** and enter the **Tenancy OCID** and an optional alias, then choose **Next**.
|
||||

|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ Before running Prowler with the OpenStack provider, ensure you have:
|
||||
|
||||
### Step 1: Add the Provider
|
||||
|
||||
1. Navigate to "Cloud Providers" and click "Add Cloud Provider".
|
||||
1. Navigate to "Providers" and click "Add Provider".
|
||||

|
||||
2. Select "OpenStack" from the provider list.
|
||||
3. Enter the "Project ID" from the OpenStack provider.
|
||||
|
||||
@@ -29,13 +29,13 @@ Set up authentication for Vercel with the [Vercel Authentication](/user-guide/pr
|
||||
### Step 1: Add the Provider
|
||||
|
||||
1. Go to [Prowler Cloud](https://cloud.prowler.com/) or launch [Prowler App](/user-guide/tutorials/prowler-app).
|
||||
2. Navigate to "Configuration" > "Cloud Providers".
|
||||
2. Navigate to "Configuration" > "Providers".
|
||||
|
||||

|
||||

|
||||
|
||||
3. Click "Add Cloud Provider".
|
||||
3. Click "Add Provider".
|
||||
|
||||

|
||||

|
||||
|
||||
4. Select "Vercel".
|
||||
|
||||
@@ -160,3 +160,25 @@ Prowler for Vercel includes security checks across the following services:
|
||||
| **Project** | Deployment protection, environment variable security, fork protection, and skew protection |
|
||||
| **Security** | Web Application Firewall (WAF), rate limiting, IP blocking, and managed rulesets |
|
||||
| **Team** | SSO enforcement, directory sync, member access, and invitation hygiene |
|
||||
|
||||
## Checks With Explicit Plan-Based Behavior
|
||||
|
||||
Prowler currently includes 26 Vercel checks. The 11 checks below have explicit billing-plan handling in the provider metadata or check logic. When the scanned scope reports a billing plan, Prowler adds plan-aware context to findings for these checks. If the API does not expose the required configuration, Prowler may return `MANUAL` and require verification in the Vercel dashboard.
|
||||
|
||||
| Check ID | Hobby | Pro | Enterprise | Notes |
|
||||
|----------|-------|-----|------------|-------|
|
||||
| `project_password_protection_enabled` | Not available | Available as a paid add-on | Available | Checks password protection for deployments |
|
||||
| `project_production_deployment_protection_enabled` | Not available | Available with supported paid deployment protection options | Available | Checks protection for production deployments |
|
||||
| `project_skew_protection_enabled` | Not available | Available | Available | Checks skew protection during rollouts |
|
||||
| `security_custom_rules_configured` | Not available | Available | Available | Returns `MANUAL` when the firewall configuration cannot be assessed from the API |
|
||||
| `security_ip_blocking_rules_configured` | Not available | Available | Available | Returns `MANUAL` when the firewall configuration cannot be assessed from the API |
|
||||
| `team_saml_sso_enabled` | Not available | Available | Available | Checks team SAML SSO configuration |
|
||||
| `team_saml_sso_enforced` | Not available | Available | Available | Checks SAML SSO enforcement for all team members |
|
||||
| `team_directory_sync_enabled` | Not available | Not available | Available | Checks SCIM directory sync |
|
||||
| `security_managed_rulesets_enabled` | Bot Protection and AI Bots managed rulesets | Bot Protection and AI Bots managed rulesets | All managed rulesets, including OWASP Core Ruleset | Returns `MANUAL` when the firewall configuration cannot be assessed from the API |
|
||||
| `security_rate_limiting_configured` | Not available | Available | Available | Returns `MANUAL` when the firewall configuration cannot be assessed from the API |
|
||||
| `security_waf_enabled` | Not available | Available | Available | Returns `MANUAL` when the firewall configuration cannot be assessed from the API |
|
||||
|
||||
<Note>
|
||||
The five firewall-related checks (`security_waf_enabled`, `security_custom_rules_configured`, `security_ip_blocking_rules_configured`, `security_rate_limiting_configured`, and `security_managed_rulesets_enabled`) return `MANUAL` when the firewall configuration endpoint is not accessible from the API. The other 15 current Vercel checks do not currently include plan-specific handling in provider logic, but every Vercel check includes exactly one billing-plan metadata category (`vercel-hobby-plan`, `vercel-pro-plan`, or `vercel-enterprise-plan`) alongside its functional security category.
|
||||
</Note>
|
||||
|
||||
@@ -123,7 +123,7 @@ The Roles section in Prowler is designed to facilitate the assignment of custom
|
||||
</Note>
|
||||
### Provider Groups
|
||||
|
||||
Provider Groups control visibility across specific providers. When creating a new role, you can assign specific groups to define their Cloud Provider visibility. This ensures that users with that role have access only to the Cloud Providers that are required.
|
||||
Provider Groups control visibility across specific providers. When creating a new role, you can assign specific groups to define their Provider visibility. This ensures that users with that role have access only to the Providers that are required.
|
||||
|
||||
By default, a new user role does not have visibility into any group.
|
||||
|
||||
@@ -223,10 +223,11 @@ Assign administrative permissions by selecting from the following options:
|
||||
| Invite and Manage Users | All | Invite new users and manage existing ones. |
|
||||
| Manage Account | All | Adjust account settings, delete users and read/manage users permissions. |
|
||||
| Manage Scans | All | Run and review scans. |
|
||||
| Manage Cloud Providers | All | Add or modify connected cloud providers. |
|
||||
| Manage Providers | All | Add or modify connected providers. |
|
||||
| Manage Integrations | All | Add or modify the Prowler Integrations. |
|
||||
| Manage Ingestions | Prowler Cloud | Allow or deny the ability to submit findings ingestion batches via the API. |
|
||||
| Manage Billing | Prowler Cloud | Access and manage billing settings and subscription information. |
|
||||
| Manage Alerts | Prowler Cloud | Create, edit, and delete alert rules and recipients. |
|
||||
|
||||
<Note>
|
||||
The **Scope** column indicates where each permission applies. **All** means the permission is available in both Prowler Cloud and Self-Managed deployments. **Prowler Cloud** indicates permissions that are specific to [Prowler Cloud](https://cloud.prowler.com/sign-in).
|
||||
@@ -241,3 +242,5 @@ The following permissions are available exclusively in **Prowler Cloud**:
|
||||
**Manage Ingestions:** Submit and manage findings ingestion jobs via the API. Required to upload OCSF scan results using the `--push-to-cloud` CLI flag or the ingestion endpoints. See [Import Findings](/user-guide/tutorials/prowler-app-import-findings) for details.
|
||||
|
||||
**Manage Billing:** Access and manage billing settings, subscription plans, and payment methods.
|
||||
|
||||
**Manage Alerts:** Create, edit, and delete alert rules and recipients used to deliver scan-result digests via email.
|
||||
|
||||
@@ -320,7 +320,7 @@ Once the required permissions are set up, proceed to configure the S3 integratio
|
||||

|
||||
4. Complete the configuration form with the following details:
|
||||
|
||||
- **Cloud Providers:** Select the providers whose scan results should be exported to this S3 bucket
|
||||
- **Providers:** Select the providers whose scan results should be exported to this S3 bucket
|
||||
- **Bucket Name:** Enter the name of the target S3 bucket (e.g., `my-security-findings-bucket`)
|
||||
- **Output Directory:** Specify the directory path within the bucket (e.g., `/prowler-findings/`, defaults to `output`)
|
||||
|
||||
|
||||
@@ -72,8 +72,8 @@ To perform security scans, link a cloud provider account. Prowler supports the f
|
||||
|
||||
Steps to add a provider:
|
||||
|
||||
1. Navigate to `Settings > Cloud Providers`.
|
||||
2. Click `Add Account` to set up a new provider and provide your credentials.
|
||||
1. Navigate to `Settings > Providers`.
|
||||
2. Click `Add Provider` to set up a new provider and provide your credentials.
|
||||
|
||||
<img src="/images/add-provider.png" alt="Add Provider" width="700" />
|
||||
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
---
|
||||
title: 'Prowler Check Kreator'
|
||||
---
|
||||
|
||||
<Note>
|
||||
Currently, this tool is only available for creating checks for the AWS provider.
|
||||
|
||||
</Note>
|
||||
|
||||
<Note>
|
||||
If you are looking for a way to create new checks for all the supported providers, you can use [Prowler Studio](https://github.com/prowler-cloud/prowler-studio), it is an AI-powered toolkit for generating and managing security checks for Prowler (better version of the Check Kreator).
|
||||
|
||||
</Note>
|
||||
|
||||
## Introduction
|
||||
|
||||
**Prowler Check Kreator** is a utility designed to streamline the creation of new checks for Prowler. This tool generates all necessary files required to add a new check to the Prowler repository. Specifically, it creates:
|
||||
|
||||
- A dedicated folder for the check.
|
||||
- The main check script.
|
||||
- A metadata file with essential details.
|
||||
- A folder and file structure for testing the check.
|
||||
|
||||
## Usage
|
||||
|
||||
To use the tool, execute the main script with the following command:
|
||||
|
||||
```bash
|
||||
python util/prowler_check_kreator/prowler_check_kreator.py <prowler_provider> <check_name>
|
||||
```
|
||||
|
||||
Parameters:
|
||||
|
||||
- `<prowler_provider>`: Currently only AWS is supported.
|
||||
- `<check_name>`: The name you wish to assign to the new check.
|
||||
|
||||
## AI integration
|
||||
|
||||
This tool optionally integrates AI to assist in generating the check code and metadata file content. When AI assistance is chosen, the tool uses [Gemini](https://gemini.google.com/) to produce preliminary code and metadata.
|
||||
|
||||
<Note>
|
||||
For this feature to work, you must have the library `google-generativeai` installed in your Python environment.
|
||||
|
||||
</Note>
|
||||
|
||||
<Warning>
|
||||
AI-generated code and metadata might contain errors or require adjustments to align with specific Prowler requirements. Carefully review all AI-generated content before committing.
|
||||
|
||||
</Warning>
|
||||
|
||||
To enable AI assistance, simply confirm when prompted by the tool. Additionally, ensure that the `GEMINI_API_KEY` environment variable is set with a valid Gemini API key. For instructions on obtaining your API key, refer to the [Gemini documentation](https://ai.google.dev/gemini-api/docs/api-key).
|
||||
@@ -246,10 +246,10 @@ Now that both roles are deployed — the management account role (Step 1) and th
|
||||
|
||||
### Open the Wizard
|
||||
|
||||
1. Navigate to **Cloud Providers** and click **Add Cloud Provider**.
|
||||
1. Navigate to **Providers** and click **Add Provider**.
|
||||
|
||||
<Frame>
|
||||
<img src="/images/organizations/cloud-providers-add.png" alt="Cloud Providers page showing the Add Cloud Provider button" />
|
||||
<img src="/images/organizations/cloud-providers-add.png" alt="Providers page showing the Add Provider button" />
|
||||
</Frame>
|
||||
|
||||
2. Select **Amazon Web Services** as the provider.
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
All notable changes to the **Prowler MCP Server** are documented in this file.
|
||||
|
||||
## [0.7.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- `cryptography` from 46.0.1 to 47.0.0 (transitive) for CVE-2026-39892 and CVE-2026-26007 / CVE-2026-34073 [(#10978)](https://github.com/prowler-cloud/prowler/pull/10978)
|
||||
|
||||
---
|
||||
|
||||
## [0.6.0] (Prowler v5.23.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
Generated
+47
-50
@@ -204,58 +204,55 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.1"
|
||||
version = "47.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cffi", marker = "platform_python_implementation != 'PyPy'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a9/62/e3664e6ffd7743e1694b244dde70b43a394f6f7fbcacf7014a8ff5197c73/cryptography-46.0.1.tar.gz", hash = "sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7", size = 749198, upload-time = "2025-09-17T00:10:35.797Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ef/b2/7ffa7fe8207a8c42147ffe70c3e360b228160c1d85dc3faff16aaa3244c0/cryptography-47.0.0.tar.gz", hash = "sha256:9f8e55fe4e63613a5e1cc5819030f27b97742d720203a087802ce4ce9ceb52bb", size = 830863, upload-time = "2026-04-24T19:54:57.056Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/8c/44ee01267ec01e26e43ebfdae3f120ec2312aa72fa4c0507ebe41a26739f/cryptography-46.0.1-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475", size = 7285044, upload-time = "2025-09-17T00:08:36.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/59/9ae689a25047e0601adfcb159ec4f83c0b4149fdb5c3030cc94cd218141d/cryptography-46.0.1-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080", size = 4308182, upload-time = "2025-09-17T00:08:39.388Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/ee/ca6cc9df7118f2fcd142c76b1da0f14340d77518c05b1ebfbbabca6b9e7d/cryptography-46.0.1-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e", size = 4572393, upload-time = "2025-09-17T00:08:41.663Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/a3/0f5296f63815d8e985922b05c31f77ce44787b3127a67c0b7f70f115c45f/cryptography-46.0.1-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6", size = 4308400, upload-time = "2025-09-17T00:08:43.559Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/8c/74fcda3e4e01be1d32775d5b4dd841acaac3c1b8fa4d0774c7ac8d52463d/cryptography-46.0.1-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8", size = 4015786, upload-time = "2025-09-17T00:08:45.758Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/b8/85d23287baeef273b0834481a3dd55bbed3a53587e3b8d9f0898235b8f91/cryptography-46.0.1-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28", size = 4982606, upload-time = "2025-09-17T00:08:47.602Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/d3/de61ad5b52433b389afca0bc70f02a7a1f074651221f599ce368da0fe437/cryptography-46.0.1-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9", size = 4604234, upload-time = "2025-09-17T00:08:49.879Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/1f/dbd4d6570d84748439237a7478d124ee0134bf166ad129267b7ed8ea6d22/cryptography-46.0.1-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736", size = 4307669, upload-time = "2025-09-17T00:08:52.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/fd/ca0a14ce7f0bfe92fa727aacaf2217eb25eb7e4ed513b14d8e03b26e63ed/cryptography-46.0.1-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b", size = 4947579, upload-time = "2025-09-17T00:08:54.697Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/6b/09c30543bb93401f6f88fce556b3bdbb21e55ae14912c04b7bf355f5f96c/cryptography-46.0.1-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab", size = 4603669, upload-time = "2025-09-17T00:08:57.16Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/9a/38cb01cb09ce0adceda9fc627c9cf98eb890fc8d50cacbe79b011df20f8a/cryptography-46.0.1-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75", size = 4435828, upload-time = "2025-09-17T00:08:59.606Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/53/435b5c36a78d06ae0bef96d666209b0ecd8f8181bfe4dda46536705df59e/cryptography-46.0.1-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5", size = 4709553, upload-time = "2025-09-17T00:09:01.832Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/c4/0da6e55595d9b9cd3b6eb5dc22f3a07ded7f116a3ea72629cab595abb804/cryptography-46.0.1-cp311-abi3-win32.whl", hash = "sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0", size = 3058327, upload-time = "2025-09-17T00:09:03.726Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/0f/cd29a35e0d6e78a0ee61793564c8cff0929c38391cb0de27627bdc7525aa/cryptography-46.0.1-cp311-abi3-win_amd64.whl", hash = "sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7", size = 3523893, upload-time = "2025-09-17T00:09:06.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/dd/eea390f3e78432bc3d2f53952375f8b37cb4d37783e626faa6a51e751719/cryptography-46.0.1-cp311-abi3-win_arm64.whl", hash = "sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0", size = 2932145, upload-time = "2025-09-17T00:09:08.568Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/fb/c73588561afcd5e24b089952bd210b14676c0c5bf1213376350ae111945c/cryptography-46.0.1-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee", size = 7193928, upload-time = "2025-09-17T00:09:10.595Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/34/0ff0bb2d2c79f25a2a63109f3b76b9108a906dd2a2eb5c1d460b9938adbb/cryptography-46.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd", size = 4293515, upload-time = "2025-09-17T00:09:12.861Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/b7/d4f848aee24ecd1be01db6c42c4a270069a4f02a105d9c57e143daf6cf0f/cryptography-46.0.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a", size = 4545619, upload-time = "2025-09-17T00:09:15.397Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/a5/42fedefc754fd1901e2d95a69815ea4ec8a9eed31f4c4361fcab80288661/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a", size = 4299160, upload-time = "2025-09-17T00:09:17.155Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/a1/cd21174f56e769c831fbbd6399a1b7519b0ff6280acec1b826d7b072640c/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a", size = 3994491, upload-time = "2025-09-17T00:09:18.971Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/2f/a8cbfa1c029987ddc746fd966711d4fa71efc891d37fbe9f030fe5ab4eec/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12", size = 4960157, upload-time = "2025-09-17T00:09:20.923Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/ae/63a84e6789e0d5a2502edf06b552bcb0fa9ff16147265d5c44a211942abe/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129", size = 4577263, upload-time = "2025-09-17T00:09:23.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/8f/1b9fa8e92bd9cbcb3b7e1e593a5232f2c1e6f9bd72b919c1a6b37d315f92/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da", size = 4298703, upload-time = "2025-09-17T00:09:25.566Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/af/bb95db070e73fea3fae31d8a69ac1463d89d1c084220f549b00dd01094a8/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b", size = 4926363, upload-time = "2025-09-17T00:09:27.451Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/3b/d8fb17ffeb3a83157a1cc0aa5c60691d062aceecba09c2e5e77ebfc1870c/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657", size = 4576958, upload-time = "2025-09-17T00:09:29.924Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/46/86bc3a05c10c8aa88c8ae7e953a8b4e407c57823ed201dbcba55c4d655f4/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0", size = 4422507, upload-time = "2025-09-17T00:09:32.222Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/4e/387e5a21dfd2b4198e74968a541cfd6128f66f8ec94ed971776e15091ac3/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0", size = 4683964, upload-time = "2025-09-17T00:09:34.118Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/a3/f9f5907b166adb8f26762071474b38bbfcf89858a5282f032899075a38a1/cryptography-46.0.1-cp314-cp314t-win32.whl", hash = "sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277", size = 3029705, upload-time = "2025-09-17T00:09:36.381Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/66/4d3a4f1850db2e71c2b1628d14b70b5e4c1684a1bd462f7fffb93c041c38/cryptography-46.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05", size = 3502175, upload-time = "2025-09-17T00:09:38.261Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/c7/9f10ad91435ef7d0d99a0b93c4360bea3df18050ff5b9038c489c31ac2f5/cryptography-46.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784", size = 2912354, upload-time = "2025-09-17T00:09:40.078Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/e5/fbd632385542a3311915976f88e0dfcf09e62a3fc0aff86fb6762162a24d/cryptography-46.0.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b", size = 7255677, upload-time = "2025-09-17T00:09:42.407Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/3e/13ce6eab9ad6eba1b15a7bd476f005a4c1b3f299f4c2f32b22408b0edccf/cryptography-46.0.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8", size = 4301110, upload-time = "2025-09-17T00:09:45.614Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/67/65dc233c1ddd688073cf7b136b06ff4b84bf517ba5529607c9d79720fc67/cryptography-46.0.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead", size = 4562369, upload-time = "2025-09-17T00:09:47.601Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/db/d64ae4c6f4e98c3dac5bf35dd4d103f4c7c345703e43560113e5e8e31b2b/cryptography-46.0.1-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2", size = 4302126, upload-time = "2025-09-17T00:09:49.335Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/19/5f1eea17d4805ebdc2e685b7b02800c4f63f3dd46cfa8d4c18373fea46c8/cryptography-46.0.1-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32", size = 4009431, upload-time = "2025-09-17T00:09:51.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/b5/229ba6088fe7abccbfe4c5edb96c7a5ad547fac5fdd0d40aa6ea540b2985/cryptography-46.0.1-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef", size = 4980739, upload-time = "2025-09-17T00:09:54.181Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/9c/50aa38907b201e74bc43c572f9603fa82b58e831bd13c245613a23cff736/cryptography-46.0.1-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0", size = 4592289, upload-time = "2025-09-17T00:09:56.731Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/33/229858f8a5bb22f82468bb285e9f4c44a31978d5f5830bb4ea1cf8a4e454/cryptography-46.0.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128", size = 4301815, upload-time = "2025-09-17T00:09:58.548Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/cb/b76b2c87fbd6ed4a231884bea3ce073406ba8e2dae9defad910d33cbf408/cryptography-46.0.1-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca", size = 4943251, upload-time = "2025-09-17T00:10:00.475Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/0f/f66125ecf88e4cb5b8017ff43f3a87ede2d064cb54a1c5893f9da9d65093/cryptography-46.0.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc", size = 4591247, upload-time = "2025-09-17T00:10:02.874Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/22/9f3134ae436b63b463cfdf0ff506a0570da6873adb4bf8c19b8a5b4bac64/cryptography-46.0.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7", size = 4428534, upload-time = "2025-09-17T00:10:04.994Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/39/e6042bcb2638650b0005c752c38ea830cbfbcbb1830e4d64d530000aa8dc/cryptography-46.0.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a", size = 4699541, upload-time = "2025-09-17T00:10:06.925Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/46/753d457492d15458c7b5a653fc9a84a1c9c7a83af6ebdc94c3fc373ca6e8/cryptography-46.0.1-cp38-abi3-win32.whl", hash = "sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1", size = 3043779, upload-time = "2025-09-17T00:10:08.951Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/50/b6f3b540c2f6ee712feeb5fa780bb11fad76634e71334718568e7695cb55/cryptography-46.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3", size = 3517226, upload-time = "2025-09-17T00:10:10.769Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/e8/77d17d00981cdd27cc493e81e1749a0b8bbfb843780dbd841e30d7f50743/cryptography-46.0.1-cp38-abi3-win_arm64.whl", hash = "sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9", size = 2923149, upload-time = "2025-09-17T00:10:13.236Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/98/40dfe932134bdcae4f6ab5927c87488754bf9eb79297d7e0070b78dd58e9/cryptography-47.0.0-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:160ad728f128972d362e714054f6ba0067cab7fb350c5202a9ae8ae4ce3ef1a0", size = 7912214, upload-time = "2026-04-24T19:53:03.864Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/c6/2733531243fba725f58611b918056b277692f1033373dcc8bd01af1c05d4/cryptography-47.0.0-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b9a8943e359b7615db1a3ba587994618e094ff3d6fa5a390c73d079ce18b3973", size = 4644617, upload-time = "2026-04-24T19:53:06.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/e3/b27be1a670a9b87f855d211cf0e1174a5d721216b7616bd52d8581d912ed/cryptography-47.0.0-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5c15764f261394b22aef6b00252f5195f46f2ca300bec57149474e2538b31f8", size = 4668186, upload-time = "2026-04-24T19:53:09.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/81/b9/8443cfe5d17d482d348cee7048acf502bb89a51b6382f06240fd290d4ca3/cryptography-47.0.0-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9c59ab0e0fa3a180a5a9c59f3a5abe3ef90d474bc56d7fadfbe80359491b615b", size = 4651244, upload-time = "2026-04-24T19:53:11.217Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/5e/13ed0cdd0eb88ba159d6dd5ebfece8cb901dbcf1ae5ac4072e28b55d3153/cryptography-47.0.0-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:34b4358b925a5ea3e14384ca781a2c0ef7ac219b57bb9eacc4457078e2b19f92", size = 5252906, upload-time = "2026-04-24T19:53:13.532Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/16/ed058e1df0f33d440217cd120d41d5dda9dd215a80b8187f68483185af82/cryptography-47.0.0-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0024b87d47ae2399165a6bfb20d24888881eeab83ae2566d62467c5ff0030ce7", size = 4701842, upload-time = "2026-04-24T19:53:15.618Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/e0/3d30986b30fdbd9e969abbdf8ba00ed0618615144341faeb57f395a084fe/cryptography-47.0.0-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:1e47422b5557bb82d3fff997e8d92cff4e28b9789576984f08c248d2b3535d93", size = 4289313, upload-time = "2026-04-24T19:53:17.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/fd/32db38e3ad0cb331f0691cb4c7a8a6f176f679124dee746b3af6633db4d9/cryptography-47.0.0-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6f29f36582e6151d9686235e586dd35bb67491f024767d10b842e520dc6a07ac", size = 4650964, upload-time = "2026-04-24T19:53:20.062Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/53/5395d944dfd48cb1f67917f533c609c34347185ef15eb4308024c876f274/cryptography-47.0.0-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a9b761f012a943b7de0e828843c5688d0de94a0578d44d6c85a1bae32f87791f", size = 5207817, upload-time = "2026-04-24T19:53:22.498Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/4f/e5711b28e1901f7d480a2b1b688b645aa4c77c73f10731ed17e7f7db3f0d/cryptography-47.0.0-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4e1de79e047e25d6e9f8cea71c86b4a53aced64134f0f003bbcbf3655fd172c8", size = 4701544, upload-time = "2026-04-24T19:53:24.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/22/c8ddc25de3010fc8da447648f5a092c40e7a8fadf01dd6d255d9c0b9373d/cryptography-47.0.0-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef6b3634087f18d2155b1e8ce264e5345a753da2c5fa9815e7d41315c90f8318", size = 4783536, upload-time = "2026-04-24T19:53:26.665Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/b6/d4a68f4ea999c6d89e8498579cba1c5fcba4276284de7773b17e4fa69293/cryptography-47.0.0-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:11dbb9f50a0f1bb9757b3d8c27c1101780efb8f0bdecfb12439c22a74d64c001", size = 4926106, upload-time = "2026-04-24T19:53:28.686Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/ed/5f524db1fade9c013aa618e1c99c6ed05e8ffc9ceee6cda22fed22dda3f4/cryptography-47.0.0-cp311-abi3-win32.whl", hash = "sha256:7fda2f02c9015db3f42bb8a22324a454516ed10a8c29ca6ece6cdbb5efe2a203", size = 3258581, upload-time = "2026-04-24T19:53:31.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/dc/1b901990b174786569029f67542b3edf72ac068b6c3c8683c17e6a2f5363/cryptography-47.0.0-cp311-abi3-win_amd64.whl", hash = "sha256:f5c3296dab66202f1b18a91fa266be93d6aa0c2806ea3d67762c69f60adc71aa", size = 3775309, upload-time = "2026-04-24T19:53:33.054Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/88/7aa18ad9c11bc87689affa5ce4368d884b517502d75739d475fc6f4a03c7/cryptography-47.0.0-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:be12cb6a204f77ed968bcefe68086eb061695b540a3dd05edac507a3111b25f0", size = 7904299, upload-time = "2026-04-24T19:53:35.003Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/55/c18f75724544872f234678fdedc871391722cb34a2aee19faa9f63100bb2/cryptography-47.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2ebd84adf0728c039a3be2700289378e1c164afc6748df1a5ed456767bef9ba7", size = 4631180, upload-time = "2026-04-24T19:53:37.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/65/31a5cc0eaca99cec5bafffe155d407115d96136bb161e8b49e0ef73f09a7/cryptography-47.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f68d6fbc7fbbcfb0939fea72c3b96a9f9a6edfc0e1b1d29778a2066030418b1", size = 4653529, upload-time = "2026-04-24T19:53:39.775Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/bc/641c0519a495f3bfd0421b48d7cd325c4336578523ccd76ea322b6c29c7a/cryptography-47.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:6651d32eff255423503aa276739da98c30f26c40cbeffcc6048e0d54ef704c0c", size = 4638570, upload-time = "2026-04-24T19:53:42.129Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/f2/300327b0a47f6dc94dd8b71b57052aefe178bb51745073d73d80604f11ab/cryptography-47.0.0-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3fb8fa48075fad7193f2e5496135c6a76ac4b2aa5a38433df0a539296b377829", size = 5238019, upload-time = "2026-04-24T19:53:44.577Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/5a/5b5cf994391d4bf9d9c7efd4c66aabe4d95227256627f8fea6cff7dfadbd/cryptography-47.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:11438c7518132d95f354fa01a4aa2f806d172a061a7bed18cf18cbdacdb204d7", size = 4686832, upload-time = "2026-04-24T19:53:47.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/2c/ae950e28fd6475c852fc21a44db3e6b5bcc1261d1e370f2b6e42fa800fef/cryptography-47.0.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8c1a736bbb3288005796c3f7ccb9453360d7fed483b13b9f468aea5171432923", size = 4269301, upload-time = "2026-04-24T19:53:48.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/fb/6a39782e150ffe5cc1b0018cb6ddc48bf7ca62b498d7539ffc8a758e977d/cryptography-47.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:f1557695e5c2b86e204f6ce9470497848634100787935ab7adc5397c54abd7ab", size = 4638110, upload-time = "2026-04-24T19:53:51.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/d7/0b3c71090a76e5c203164a47688b697635ece006dcd2499ab3a4dbd3f0bd/cryptography-47.0.0-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:f9a034b642b960767fb343766ae5ba6ad653f2e890ddd82955aef288ffea8736", size = 5194988, upload-time = "2026-04-24T19:53:52.962Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/33/63a961498a9df51721ab578c5a2622661411fc520e00bd83b0cc64eb20c4/cryptography-47.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:b1c76fca783aa7698eb21eb14f9c4aa09452248ee54a627d125025a43f83e7a7", size = 4686563, upload-time = "2026-04-24T19:53:55.274Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/bf/5ee5b145248f92250de86145d1c1d6edebbd57a7fe7caa4dedb5d4cf06a1/cryptography-47.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4f7722c97826770bab8ae92959a2e7b20a5e9e9bf4deae68fd86c3ca457bab52", size = 4770094, upload-time = "2026-04-24T19:53:57.753Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/43/21d220b2da5d517773894dacdcdb5c682c28d3fffce65548cb06e87d5501/cryptography-47.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:09f6d7bf6724f8db8b32f11eccf23efc8e759924bc5603800335cf8859a3ddbd", size = 4913811, upload-time = "2026-04-24T19:54:00.236Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/98/dc4ad376ac5f1a1a7d4a83f7b0c6f2bcad36b5d2d8f30aeb482d3a7d9582/cryptography-47.0.0-cp314-cp314t-win32.whl", hash = "sha256:6eebcaf0df1d21ce1f90605c9b432dd2c4f4ab665ac29a40d5e3fc68f51b5e63", size = 3237158, upload-time = "2026-04-24T19:54:02.606Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/da/97f62d18306b5133468bc3f8cc73a3111e8cdc8cf8d3e69474d6e5fd2d1b/cryptography-47.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:51c9313e90bd1690ec5a75ed047c27c0b8e6c570029712943d6116ef9a90620b", size = 3758706, upload-time = "2026-04-24T19:54:04.433Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/34/a4fae8ae7c3bc227460c9ae43f56abf1b911da0ec29e0ebac53bb0a4b6b7/cryptography-47.0.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:14432c8a9bcb37009784f9594a62fae211a2ae9543e96c92b2a8e4c3cd5cd0c4", size = 7904072, upload-time = "2026-04-24T19:54:06.411Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/64/d7b1e54fdb69f22d24a64bb3e88dc718b31c7fb10ef0b9691a3cf7eeea6e/cryptography-47.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:07efe86201817e7d3c18781ca9770bc0db04e1e48c994be384e4602bc38f8f27", size = 4635767, upload-time = "2026-04-24T19:54:08.519Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/7b/cca826391fb2a94efdcdfe4631eb69306ee1cff0b22f664a412c90713877/cryptography-47.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2b45761c6ec22b7c726d6a829558777e32d0f1c8be7c3f3480f9c912d5ee8a10", size = 4654350, upload-time = "2026-04-24T19:54:10.795Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/65/4b57bcc823f42a991627c51c2f68c9fd6eb1393c1756aac876cba2accae2/cryptography-47.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:edd4da498015da5b9f26d38d3bfc2e90257bfa9cbed1f6767c282a0025ae649b", size = 4643394, upload-time = "2026-04-24T19:54:13.275Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/c4/2c5fbeea70adbbca2bbae865e1d605d6a4a7f8dbd9d33eaf69645087f06c/cryptography-47.0.0-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:9af828c0d5a65c70ec729cd7495a4bf1a67ecb66417b8f02ff125ab8a6326a74", size = 5225777, upload-time = "2026-04-24T19:54:15.18Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/b8/ac57107ef32749d2b244e36069bb688792a363aaaa3acc9e3cf84c130315/cryptography-47.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:256d07c78a04d6b276f5df935a9923275f53bd1522f214447fdf365494e2d515", size = 4688771, upload-time = "2026-04-24T19:54:17.835Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/fc/9f1de22ff8be99d991f240a46863c52d475404c408886c5a38d2b5c3bb26/cryptography-47.0.0-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:5d0e362ff51041b0c0d219cc7d6924d7b8996f57ce5712bdcef71eb3c65a59cc", size = 4270753, upload-time = "2026-04-24T19:54:19.963Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/68/d70c852797aa68e8e48d12e5a87170c43f67bb4a59403627259dd57d15de/cryptography-47.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:1581aef4219f7ca2849d0250edaa3866212fb74bf5667284f46aa92f9e65c1ca", size = 4642911, upload-time = "2026-04-24T19:54:21.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/51/661cbee74f594c5d97ff82d34f10d5551c085ca4668645f4606ebd22bd5d/cryptography-47.0.0-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a49a3eb5341b9503fa3000a9a0db033161db90d47285291f53c2a9d2cd1b7f76", size = 5181411, upload-time = "2026-04-24T19:54:24.376Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/87/f2b6c374a82cf076cfa1416992ac8e8ec94d79facc37aec87c1a5cb72352/cryptography-47.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2207a498b03275d0051589e326b79d4cf59985c99031b05bb292ac52631c37fe", size = 4688262, upload-time = "2026-04-24T19:54:26.946Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/e2/8b7462f4acf21ec509616f0245018bb197194ab0b65c2ea21a0bdd53c0eb/cryptography-47.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7a02675e2fabd0c0fc04c868b8781863cbf1967691543c22f5470500ff840b31", size = 4775506, upload-time = "2026-04-24T19:54:28.926Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/75/158e494e4c08dc05e039da5bb48553826bd26c23930cf8d3cd5f21fa8921/cryptography-47.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80887c5cbd1774683cb126f0ab4184567f080071d5acf62205acb354b4b753b7", size = 4912060, upload-time = "2026-04-24T19:54:30.869Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/bd/0a9d3edbf5eadbac926d7b9b3cd0c4be584eeeae4a003d24d9eda4affbbd/cryptography-47.0.0-cp38-abi3-win32.whl", hash = "sha256:ed67ea4e0cfb5faa5bc7ecb6e2b8838f3807a03758eec239d6c21c8769355310", size = 3248487, upload-time = "2026-04-24T19:54:33.494Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/80/5681af756d0da3a599b7bdb586fac5a1540f1bcefd2717a20e611ddade45/cryptography-47.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:835d2d7f47cdc53b3224e90810fb1d36ca94ea29cc1801fb4c1bc43876735769", size = 3755737, upload-time = "2026-04-24T19:54:35.408Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -929,11 +926,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.20"
|
||||
version = "0.0.27"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/69/9b/f23807317a113dc36e74e75eb265a02dd1a4d9082abc3c1064acd22997c4/python_multipart-0.0.27.tar.gz", hash = "sha256:9870a6a8c5a20a5bf4f07c017bd1489006ff8836cff097b6933355ee2b49b602", size = 44043, upload-time = "2026-04-27T10:51:26.649Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/78/4126abcbdbd3c559d43e0db7f7b9173fc6befe45d39a2856cc0b8ec2a5a6/python_multipart-0.0.27-py3-none-any.whl", hash = "sha256:6fccfad17a27334bd0193681b369f476eda3409f17381a2d65aa7df3f7275645", size = 29254, upload-time = "2026-04-27T10:51:24.997Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Generated
+60
-60
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "about-time"
|
||||
@@ -2029,61 +2029,61 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.6"
|
||||
version = "46.0.7"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"},
|
||||
{file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"},
|
||||
{file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"},
|
||||
{file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"},
|
||||
{file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"},
|
||||
{file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win32.whl", hash = "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win32.whl", hash = "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win32.whl", hash = "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win_amd64.whl", hash = "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4"},
|
||||
{file = "cryptography-46.0.7.tar.gz", hash = "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2097,7 +2097,7 @@ nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
@@ -5445,25 +5445,25 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.4"
|
||||
version = "2.33.1"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"},
|
||||
{file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"},
|
||||
{file = "requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a"},
|
||||
{file = "requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=2017.4.17"
|
||||
certifi = ">=2023.5.7"
|
||||
charset_normalizer = ">=2,<4"
|
||||
idna = ">=2.5,<4"
|
||||
urllib3 = ">=1.21.1,<3"
|
||||
urllib3 = ">=1.26,<3"
|
||||
|
||||
[package.extras]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
|
||||
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
|
||||
use-chardet-on-py3 = ["chardet (>=3.0.2,<8)"]
|
||||
|
||||
[[package]]
|
||||
name = "requests-file"
|
||||
@@ -6735,4 +6735,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
content-hash = "09ce4507a464b318702ed8c6a738f3bb1bc4cc6ff5a50a9c2884f560af9ab034"
|
||||
content-hash = "d7e2ad41783a864bb845f63ccc10c88ae1e4ac36d61993ea106bbb4a5f58a843"
|
||||
|
||||
@@ -2,6 +2,68 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.26.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- `bedrock_guardrails_configured` check for AWS provider [(#10844)](https://github.com/prowler-cloud/prowler/pull/10844)
|
||||
- Universal compliance pipeline integrated into the CLI: `--list-compliance` and `--list-compliance-requirements` show universal frameworks, and CSV plus OCSF outputs are generated for any framework declaring a `TableConfig` [(#10301)](https://github.com/prowler-cloud/prowler/pull/10301)
|
||||
- ASD Essential Eight Maturity Model compliance framework for AWS (Maturity Level One, Nov 2023) [(#10808)](https://github.com/prowler-cloud/prowler/pull/10808)
|
||||
- Update Vercel checks to return personalized finding status extended depending on billing plan and classify them with billing-plan categories [(#10663)](https://github.com/prowler-cloud/prowler/pull/10663)
|
||||
- `bedrock_prompt_management_exists` check for AWS provider [(#10878)](https://github.com/prowler-cloud/prowler/pull/10878)
|
||||
- 8 Gmail attachment safety and spoofing protection checks for Google Workspace provider using the Cloud Identity Policy API [(#10980)](https://github.com/prowler-cloud/prowler/pull/10980)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Azure Network Watcher flow log checks now require workspace-backed Traffic Analytics for `network_flow_log_captured_sent` and align metadata with VNet-compatible flow log guidance [(#10645)](https://github.com/prowler-cloud/prowler/pull/10645)
|
||||
- Azure compliance entries for legacy Network Watcher flow log controls now use retirement-aware guidance and point new deployments to VNet flow logs [(#10937)](https://github.com/prowler-cloud/prowler/pull/10937)
|
||||
- AWS CodeBuild service now batches `BatchGetProjects` and `BatchGetBuilds` calls per region (up to 100 items per call) to reduce API call volume and prevent throttling-induced false positives in `codebuild_project_not_publicly_accessible` [(#10639)](https://github.com/prowler-cloud/prowler/pull/10639)
|
||||
- `display_compliance_table` dispatch switched from substring `in` checks to `startswith` to prevent false matches between similarly named frameworks (e.g. `cisa` vs `cis`) [(#10301)](https://github.com/prowler-cloud/prowler/pull/10301)
|
||||
- Restore the `ec2-imdsv1` category for EC2 IMDS checks to keep Attack Surface and findings filters aligned [(#10998)](https://github.com/prowler-cloud/prowler/pull/10998)
|
||||
- Container image CVE findings and IaC findings now use official CVE, Prowler Hub, or GitHub Security Advisory URLs instead of Aqua advisory URLs in remediation and references; Trivy rule IDs map to Prowler Hub without the `AVD-` prefix so links resolve [(#10853)](https://github.com/prowler-cloud/prowler/pull/10853)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- AWS SDK test isolation: autouse `mock_aws` fixture and leak detector in `conftest.py` to prevent tests from hitting real AWS endpoints, with idempotent organization setup for tests calling `set_mocked_aws_provider` multiple times [(#10605)](https://github.com/prowler-cloud/prowler/pull/10605)
|
||||
- AWS `boto` user agent extra is now applied to every client [(#10944)](https://github.com/prowler-cloud/prowler/pull/10944)
|
||||
- Image provider connection check no longer fails with a misleading `host='https'` resolution error when the registry URL includes an `http://` or `https://` scheme prefix [(#10950)](https://github.com/prowler-cloud/prowler/pull/10950)
|
||||
- Azure subscriptions sharing the same display name are no longer collapsed into a single identity entry, so every subscription is scanned [(#10718)](https://github.com/prowler-cloud/prowler/pull/10718)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- Parser-mismatch SSRF in image provider registry auth where crafted bearer-token realms and pagination links could force requests to internal addresses and leak credentials cross-origin [(#10945)](https://github.com/prowler-cloud/prowler/pull/10945)
|
||||
- `cryptography` from 46.0.6 to 46.0.7 and `trivy` binary from 0.69.2 to 0.70.0 in the SDK image for CVE-2026-39892 and CVE-2026-33186 [(#10978)](https://github.com/prowler-cloud/prowler/pull/10978)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.3] (Prowler v5.25.3)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Oracle Cloud identity scans known or supplied regions to better support non Ashburn tenancies [(#10529)](https://github.com/prowler-cloud/prowler/pull/10529)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.2] (Prowler v5.25.2)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- `route53_dangling_ip_subdomain_takeover` now also flags `CNAME` records pointing to S3 website endpoints whose buckets are missing from the account [(#10920)](https://github.com/prowler-cloud/prowler/pull/10920)
|
||||
- Duplicate Kubernetes RBAC findings when the same User or Group subject appeared in multiple ClusterRoleBindings [(#10242)](https://github.com/prowler-cloud/prowler/pull/10242)
|
||||
- Match K8s RBAC rules by `apiGroup` [(#10969)](https://github.com/prowler-cloud/prowler/pull/10969)
|
||||
- Return a compact actor name from CloudTrail `userIdentity` events [(#10986)](https://github.com/prowler-cloud/prowler/pull/10986)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.1] (Prowler v5.25.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- `KeyError` when generating compliance outputs after the CLI scan [#10919](https://github.com/prowler-cloud/prowler/pull/10919)
|
||||
- Kubernetes OCSF `provider_uid` now uses the cluster name in in-cluster mode (so `--cluster-name` is correctly reflected in findings) and keeps the kubeconfig context in kubeconfig mode [(#10483)](https://github.com/prowler-cloud/prowler/pull/10483)
|
||||
|
||||
---
|
||||
|
||||
## [5.25.0] (Prowler v5.25.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
+56
-7
@@ -45,7 +45,10 @@ from prowler.lib.check.check import (
|
||||
)
|
||||
from prowler.lib.check.checks_loader import load_checks_to_execute
|
||||
from prowler.lib.check.compliance import update_checks_metadata_with_compliance
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.check.compliance_models import (
|
||||
Compliance,
|
||||
get_bulk_compliance_frameworks_universal,
|
||||
)
|
||||
from prowler.lib.check.custom_checks_metadata import (
|
||||
parse_custom_checks_metadata_file,
|
||||
update_checks_metadata,
|
||||
@@ -54,6 +57,9 @@ from prowler.lib.check.models import CheckMetadata
|
||||
from prowler.lib.cli.parser import ProwlerArgumentParser
|
||||
from prowler.lib.logger import logger, set_logging_config
|
||||
from prowler.lib.outputs.asff.asff import ASFF
|
||||
from prowler.lib.outputs.compliance.asd_essential_eight.asd_essential_eight_aws import (
|
||||
ASDEssentialEightAWS,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected import (
|
||||
AWSWellArchitected,
|
||||
)
|
||||
@@ -75,7 +81,10 @@ from prowler.lib.outputs.compliance.cis.cis_oraclecloud import OracleCloudCIS
|
||||
from prowler.lib.outputs.compliance.cisa_scuba.cisa_scuba_googleworkspace import (
|
||||
GoogleWorkspaceCISASCuBA,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
from prowler.lib.outputs.compliance.compliance import (
|
||||
display_compliance_table,
|
||||
process_universal_compliance_frameworks,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.csa.csa_alibabacloud import AlibabaCloudCSA
|
||||
from prowler.lib.outputs.compliance.csa.csa_aws import AWSCSA
|
||||
from prowler.lib.outputs.compliance.csa.csa_azure import AzureCSA
|
||||
@@ -235,6 +244,8 @@ def prowler():
|
||||
# Load compliance frameworks
|
||||
logger.debug("Loading compliance frameworks from .json files")
|
||||
|
||||
universal_frameworks = {}
|
||||
|
||||
# Skip compliance frameworks for external-tool providers
|
||||
if provider not in EXTERNAL_TOOL_PROVIDERS:
|
||||
bulk_compliance_frameworks = Compliance.get_bulk(provider)
|
||||
@@ -242,6 +253,8 @@ def prowler():
|
||||
bulk_checks_metadata = update_checks_metadata_with_compliance(
|
||||
bulk_compliance_frameworks, bulk_checks_metadata
|
||||
)
|
||||
# Load universal compliance frameworks for new rendering pipeline
|
||||
universal_frameworks = get_bulk_compliance_frameworks_universal(provider)
|
||||
|
||||
# Update checks metadata if the --custom-checks-metadata-file is present
|
||||
custom_checks_metadata = None
|
||||
@@ -254,12 +267,12 @@ def prowler():
|
||||
)
|
||||
|
||||
if args.list_compliance:
|
||||
print_compliance_frameworks(bulk_compliance_frameworks)
|
||||
all_frameworks = {**bulk_compliance_frameworks, **universal_frameworks}
|
||||
print_compliance_frameworks(all_frameworks)
|
||||
sys.exit()
|
||||
if args.list_compliance_requirements:
|
||||
print_compliance_requirements(
|
||||
bulk_compliance_frameworks, args.list_compliance_requirements
|
||||
)
|
||||
all_frameworks = {**bulk_compliance_frameworks, **universal_frameworks}
|
||||
print_compliance_requirements(all_frameworks, args.list_compliance_requirements)
|
||||
sys.exit()
|
||||
|
||||
# Load checks to execute
|
||||
@@ -276,6 +289,7 @@ def prowler():
|
||||
provider=provider,
|
||||
list_checks=getattr(args, "list_checks", False)
|
||||
or getattr(args, "list_checks_json", False),
|
||||
universal_frameworks=universal_frameworks,
|
||||
)
|
||||
|
||||
# if --list-checks-json, dump a json file and exit
|
||||
@@ -624,9 +638,29 @@ def prowler():
|
||||
)
|
||||
|
||||
# Compliance Frameworks
|
||||
# Source the framework listing from the union of `bulk_compliance_frameworks`
|
||||
# and `universal_frameworks` so universal-only frameworks (e.g.
|
||||
# `prowler/compliance/csa_ccm_4.0.json`) — which `Compliance.get_bulk(provider)`
|
||||
# does not load — still reach `process_universal_compliance_frameworks` below.
|
||||
# The provider-specific block subtracts the names handled by the universal
|
||||
# processor so the legacy per-provider handlers only see frameworks that the
|
||||
# bulk loader actually resolved.
|
||||
input_compliance_frameworks = set(output_options.output_modes).intersection(
|
||||
get_available_compliance_frameworks(provider)
|
||||
set(bulk_compliance_frameworks.keys()) | set(universal_frameworks.keys())
|
||||
)
|
||||
|
||||
# ── Universal compliance frameworks (provider-agnostic) ──
|
||||
universal_processed = process_universal_compliance_frameworks(
|
||||
input_compliance_frameworks=input_compliance_frameworks,
|
||||
universal_frameworks=universal_frameworks,
|
||||
finding_outputs=finding_outputs,
|
||||
output_directory=output_options.output_directory,
|
||||
output_filename=output_options.output_filename,
|
||||
provider=provider,
|
||||
generated_outputs=generated_outputs,
|
||||
)
|
||||
input_compliance_frameworks -= universal_processed
|
||||
|
||||
if provider == "aws":
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if compliance_name.startswith("cis_"):
|
||||
@@ -642,6 +676,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(cis)
|
||||
cis.batch_write_data_to_file()
|
||||
elif compliance_name.startswith("asd_essential_eight"):
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
asd_essential_eight = ASDEssentialEightAWS(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(asd_essential_eight)
|
||||
asd_essential_eight.batch_write_data_to_file()
|
||||
elif compliance_name == "mitre_attack_aws":
|
||||
# Generate MITRE ATT&CK Finding Object
|
||||
filename = (
|
||||
@@ -1396,6 +1442,9 @@ def prowler():
|
||||
output_options.output_filename,
|
||||
output_options.output_directory,
|
||||
compliance_overview,
|
||||
universal_frameworks=universal_frameworks,
|
||||
provider=provider,
|
||||
output_formats=args.output_formats,
|
||||
)
|
||||
if compliance_overview:
|
||||
print(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6426,9 +6426,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6485,9 +6485,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6546,8 +6546,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -6606,8 +6606,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_agent_guardrail_enabled"
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -2894,8 +2894,10 @@
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_guardrails_configured",
|
||||
"bedrock_model_invocation_logging_enabled",
|
||||
"bedrock_model_invocation_logs_encryption_enabled",
|
||||
"bedrock_prompt_management_exists",
|
||||
"cloudformation_stack_outputs_find_secrets",
|
||||
"cloudfront_distributions_custom_ssl_certificate",
|
||||
"cloudfront_distributions_default_root_object",
|
||||
|
||||
@@ -2898,8 +2898,10 @@
|
||||
"bedrock_agent_guardrail_enabled",
|
||||
"bedrock_guardrail_prompt_attack_filter_enabled",
|
||||
"bedrock_guardrail_sensitive_information_filter_enabled",
|
||||
"bedrock_guardrails_configured",
|
||||
"bedrock_model_invocation_logging_enabled",
|
||||
"bedrock_model_invocation_logs_encryption_enabled",
|
||||
"bedrock_prompt_management_exists",
|
||||
"cloudformation_stack_outputs_find_secrets",
|
||||
"cloudfront_distributions_custom_ssl_certificate",
|
||||
"cloudfront_distributions_default_root_object",
|
||||
|
||||
@@ -2276,9 +2276,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting thegeneration of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select NSG flow logs. 3. Select + Create. 4. Select the desired Subscription. 5. Select + Select NSG. 6. Select a network security group. 7. Click Confirm selection. 8. Select or create a new Storage Account. 9. Input the retention in days to retain the log. 10. Click Next. 11. Under Configuration, select Version 2. 12. If rich analytics are required, select Enable Traffic Analytics, a processing interval, and a Log Analytics Workspace. 13. Select Next. 14. Optionally add Tags. 15. Select Review + create. 16. Select Create. Warning The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select NSG flow logs 3. For each log you wish to audit select it from this view.",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "From Azure Portal Existing NSG flow logs can still be reviewed under Network Watcher > Flow logs. If you already have NSG flow logs configured, ensure they remain enabled and that Traffic Analytics sends data to a Log Analytics Workspace until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create Virtual network flow logs instead: 1. Navigate to Network Watcher. 2. Select Flow logs. 3. Select + Create. 4. Select the desired Subscription. 5. For Flow log type, select Virtual network. 6. Select + Select target resource. 7. Select a virtual network. 8. Click Confirm selection. 9. Select or create a new Storage Account. 10. Input the retention in days to retain the log. 11. Click Next. 12. Under Analytics, select Version 2, enable Traffic Analytics, and select a Log Analytics Workspace. 13. Select Next. 14. Optionally add Tags. 15. Select Review + create. 16. Select Create.",
|
||||
"AuditProcedure": "From Azure Portal 1. Navigate to Network Watcher. 2. Select Flow logs. 3. Review existing Network security group flow logs, if any remain, to ensure they are enabled and configured to send logs to a Log Analytics Workspace. 4. Review Virtual network flow logs for new or migrated coverage.",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2702,9 +2702,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "From Azure Portal 1. Go to Network Watcher 2. Select NSG flow logs blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure Status is set to On 5. Ensure Retention (days) setting greater than 90 days 6. Select your storage account in the Storage account field 7. Select Save From Azure CLI Enable the NSG flow logs and set the Retention (days) to greater than or equal to 90 days. az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 -- storage-account <NameorID of the storage account to save flow logs>",
|
||||
"AuditProcedure": "From Azure Portal 1. Go to Network Watcher 2. Select NSG flow logs blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure Status is set to On 5. Ensure Retention (days) setting greater than 90 days From Azure CLI az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' Ensure that enabled is set to true and days is set to greater then or equal to 90.",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "From Azure Portal Existing NSG flow logs can still be reviewed under Network Watcher > Flow logs. If you already have NSG flow logs configured, ensure Status is set to On and Retention (days) is set to 0, 90, or a number greater than 90 until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure Virtual network flow logs instead and set Retention days to 0, 90, or a number greater than 90. From Azure CLI Update an existing flow log retention policy with az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days>.",
|
||||
"AuditProcedure": "From Azure Portal 1. Go to Network Watcher. 2. Select Flow logs. 3. Review existing Network security group flow logs, if any remain, and ensure Status is set to On and Retention (days) is set to 0, 90, or a number greater than 90. 4. Review Virtual network flow logs for new or migrated coverage. From Azure CLI az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] Ensure each relevant flow log has retention days set to 0, 90, or a number greater than 90.",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are disabled.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -2241,9 +2241,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `NSG flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. Select `+ Select NSG`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. Input the retention in days to retain the log. 1. Click `Next`. 1. Under `Configuration`, select `Version 2`. 1. If rich analytics are required, select `Enable Traffic Analytics`, a processing interval, and a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `NSG flow logs` 1. For each log you wish to audit select it from this view. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**From Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. Input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Navigate to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2627,9 +2627,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**From Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` 6. Select your storage account in the `Storage account` field 7. Select `Save` **From Azure CLI** Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days. ``` az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs> ```",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` **From Azure CLI** ``` az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' ``` Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**From Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`. **From Azure CLI** Update an existing flow log retention policy with: ``` az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days> ```",
|
||||
"AuditProcedure": "**From Azure Portal** 1. Go to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`. 1. Review `Virtual network` flow logs for new or migrated coverage. **From Azure CLI** ``` az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] ``` Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`. **From Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -2548,9 +2548,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace.This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Select `+ Create`.1. Select the desired Subscription.1. For `Flow log type`, select `Network security group`.1. Select `+ Select target resource`.1. Select `Network security group`.1. Select a network security group.1. Click `Confirm selection`.1. Select or create a new Storage Account.1. If using a v2 storage account, input the retention in days to retain the log.1. Click `Next`.1. Under `Analytics`, for `Flow log version`, select `Version 2`.1. Check the box next to `Enable traffic analytics`.1. Select a processing interval.1. Select a `Log Analytics Workspace`.1. Select `Next`.1. Optionally add Tags.1. Select `Review + create`.1. Select `Create`.***Warning***The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Click `Add filter`.1. From the `Filter` drop-down, select `Flow log type`.1. From the `Value` drop-down, check `Network security group` only.1. Click `Apply`.1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state'- **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group'- **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Select `+ Create`.1. Select the desired Subscription.1. For `Flow log type`, select `Virtual network`.1. Select `+ Select target resource`.1. Select `Virtual network`.1. Select a virtual network.1. Click `Confirm selection`.1. Select or create a new Storage Account.1. If using a v2 storage account, input the retention in days to retain the log.1. Click `Next`.1. Under `Analytics`, for `Flow log version`, select `Version 2`.1. Check the box next to `Enable traffic analytics`.1. Select a processing interval.1. Select a `Log Analytics Workspace`.1. Select `Next`.1. Optionally add Tags.1. Select `Review + create`.1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Navigate to `Network Watcher`.1. Under `Logs`, select `Flow logs`.1. Click `Add filter`.1. From the `Filter` drop-down, select `Flow log type`.1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`.1. Review `Virtual network` flow logs for new or migrated coverage.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state'- **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group'- **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation"
|
||||
}
|
||||
@@ -2934,9 +2934,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal**1. Go to `Network Watcher`2. Select `NSG flow logs` blade in the Logs section3. Select each Network Security Group from the list4. Ensure `Status` is set to `On`5. Ensure `Retention (days)` setting `greater than 90 days`6. Select your storage account in the `Storage account` field7. Select `Save`**Remediate from Azure CLI**Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days.```az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs>```",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Go to `Network Watcher`2. Select `NSG flow logs` blade in the Logs section3. Select each Network Security Group from the list4. Ensure `Status` is set to `On`5. Ensure `Retention (days)` setting `greater than 90 days`**Audit from Azure CLI**```az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy'```Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`.**Remediate from Azure CLI**Update an existing flow log retention policy with:```az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days>```",
|
||||
"AuditProcedure": "**Audit from Azure Portal**1. Go to `Network Watcher`.1. Select `Flow logs`.1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`.1. Review `Virtual network` flow logs for new or migrated coverage.**Audit from Azure CLI**```az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId]```Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`.**Audit from Azure Policy**If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure.If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions- **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies.",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention"
|
||||
}
|
||||
|
||||
@@ -1302,9 +1302,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow logs are captured and sent to Log Analytics` in this section.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Network security group`. 1. Select `+ Select target resource`. 1. Select `Network security group`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. From the `Value` drop-down, check `Network security group` only. 1. Click `Apply`. 1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics."
|
||||
}
|
||||
@@ -1789,9 +1789,9 @@
|
||||
"Description": "Network Security Group Flow Logs should be enabled and the retention period set to greater than or equal to 90 days. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow log retention days is set to greater than or equal to 90` in this section.",
|
||||
"RationaleStatement": "Flow logs enable capturing information about IP traffic flowing in and out of network security groups. Logs can be used to check for anomalies and give insight into suspected breaches.",
|
||||
"ImpactStatement": "This will keep IP traffic logs for longer than 90 days. As a level 2, first determine your need to retain data, then apply your selection here. As this is data stored for longer, your monthly storage costs will increase depending on your data use.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` 6. Select your storage account in the `Storage account` field 7. Select `Save` **Remediate from Azure CLI** Enable the `NSG flow logs` and set the Retention (days) to greater than or equal to 90 days. ``` az network watcher flow-log configure --nsg <NameorID of the Network Security Group> --enabled true --resource-group <resourceGroupName> --retention 91 --storage-account <NameorID of the storage account to save flow logs> ```",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Go to `Network Watcher` 2. Select `NSG flow logs` blade in the Logs section 3. Select each Network Security Group from the list 4. Ensure `Status` is set to `On` 5. Ensure `Retention (days)` setting `greater than 90 days` **Audit from Azure CLI** ``` az network watcher flow-log show --resource-group <resourceGroup> --nsg <NameorID of the NetworkSecurityGroup> --query 'retentionPolicy' ``` Ensure that `enabled` is set to `true` and `days` is set to `greater then or equal to 90`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, configure `Virtual network` flow logs instead and set `Retention days` to `0`, `90`, or a number greater than `90`. **Remediate from Azure CLI** Update an existing flow log retention policy with: ``` az network watcher flow-log update --location <location> --name <flow-log> --retention <number-of-days> ```",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Go to `Network Watcher`. 1. Select `Flow logs`. 1. Review existing `Network security group` flow logs, if any remain, and ensure `Status` is set to `On` and `Retention (days)` is set to `0`, `90`, or a number greater than `90`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure CLI** ``` az network watcher flow-log list --location <location> --query [*].[name,retentionPolicy,targetResourceId] ``` Ensure each relevant flow log has retention days set to `0`, `90`, or a number greater than `90`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [5e1cd26a-5090-4fdb-9d6a-84a90335e22d](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F5e1cd26a-5090-4fdb-9d6a-84a90335e22d) **- Name:** 'Configure network security groups to use specific workspace, storage account and flowlog retention policy for traffic analytics'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview:https://docs.microsoft.com/en-us/cli/azure/network/watcher/flow-log?view=azure-cli-latest:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-6-configure-log-storage-retention",
|
||||
"DefaultValue": "By default, Network Security Group Flow Logs are `disabled`."
|
||||
}
|
||||
|
||||
@@ -1292,9 +1292,9 @@
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace. **Retirement Notice** On September 30, 2027, network security group (NSG) flow logs will be retired. Starting June 30, 2025, it will no longer be possible to create new NSG flow logs. Azure recommends migrating to virtual network flow logs. Review https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement for more information. For virtual network flow logs, consider applying the recommendation `Ensure that virtual network flow logs are captured and sent to Log Analytics` in this section.",
|
||||
"RationaleStatement": "Network Flow Logs provide valuable insight into the flow of traffic around your network and feed into both Azure Monitor and Azure Sentinel (if in use), permitting the generation of visual flow diagrams to aid with analyzing for lateral movement, etc.",
|
||||
"ImpactStatement": "The impact of configuring NSG Flow logs is primarily one of cost and configuration. If deployed, it will create storage accounts that hold minimal amounts of data on a 5-day lifecycle before feeding to Log Analytics Workspace. This will increase the amount of data stored and used by Azure Monitor.",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Network security group`. 1. Select `+ Select target resource`. 1. Select `Network security group`. 1. Select a network security group. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`. ***Warning*** The remediation policy creates remediation deployment and names them by concatenating the subscription name and the resource group name. The MAXIMUM permitted length of a deployment name is 64 characters. Exceeding this will cause the remediation task to fail.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. From the `Value` drop-down, check `Network security group` only. 1. Click `Apply`. 1. Ensure that at least one network security group flow log is listed and is configured to send logs to a `Log Analytics Workspace`. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "",
|
||||
"RemediationProcedure": "**Remediate from Azure Portal** Existing NSG flow logs can still be reviewed under `Network Watcher` > `Flow logs`. If you already have NSG flow logs configured, ensure they remain enabled and that `Traffic Analytics` sends data to a `Log Analytics Workspace` until migration is complete. Azure no longer allows creation of new NSG flow logs after June 30, 2025. For new or migrated deployments, create `Virtual network` flow logs instead: 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Select `+ Create`. 1. Select the desired Subscription. 1. For `Flow log type`, select `Virtual network`. 1. Select `+ Select target resource`. 1. Select `Virtual network`. 1. Select a virtual network. 1. Click `Confirm selection`. 1. Select or create a new Storage Account. 1. If using a v2 storage account, input the retention in days to retain the log. 1. Click `Next`. 1. Under `Analytics`, for `Flow log version`, select `Version 2`. 1. Check the box next to `Enable traffic analytics`. 1. Select a processing interval. 1. Select a `Log Analytics Workspace`. 1. Select `Next`. 1. Optionally add Tags. 1. Select `Review + create`. 1. Select `Create`.",
|
||||
"AuditProcedure": "**Audit from Azure Portal** 1. Navigate to `Network Watcher`. 1. Under `Logs`, select `Flow logs`. 1. Click `Add filter`. 1. From the `Filter` drop-down, select `Flow log type`. 1. Review existing `Network security group` flow logs, if any remain, to ensure they are enabled and configured to send logs to a `Log Analytics Workspace`. 1. Review `Virtual network` flow logs for new or migrated coverage. **Audit from Azure Policy** If referencing a digital copy of this Benchmark, clicking a Policy ID will open a link to the associated Policy definition in Azure. If referencing a printed copy, you can search Policy IDs from this URL: https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyMenuBlade/~/Definitions - **Policy ID:** [27960feb-a23c-4577-8d36-ef8b5f35e0be](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F27960feb-a23c-4577-8d36-ef8b5f35e0be) **- Name:** 'All flow log resources should be in enabled state' - **Policy ID:** [c251913d-7d24-4958-af87-478ed3b9ba41](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fc251913d-7d24-4958-af87-478ed3b9ba41) **- Name:** 'Flow logs should be configured for every network security group' - **Policy ID:** [4c3c6c5f-0d47-4402-99b8-aa543dd8bcee](https://portal.azure.com/#view/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2F4c3c6c5f-0d47-4402-99b8-aa543dd8bcee) **- Name:** 'Flow logs should be configured for every virtual network'",
|
||||
"AdditionalInformation": "On September 30, 2027, NSG flow logs will be retired, and creating new NSG flow logs has not been possible since June 30, 2025. Azure recommends migrating to virtual network flow logs, which address NSG flow log limitations. After retirement, traffic analytics using NSG flow logs will no longer be supported, and existing NSG flow log resources will be deleted. Previously collected NSG flow log records will remain available per their retention policies. For details, see the official announcement: https://azure.microsoft.com/en-gb/updates?id=Azure-NSG-flow-logs-Retirement.",
|
||||
"References": "https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-portal:https://learn.microsoft.com/en-us/security/benchmark/azure/mcsb-logging-threat-detection#lt-4-enable-network-logging-for-security-investigation",
|
||||
"DefaultValue": "By default Network Security Group logs are not sent to Log Analytics."
|
||||
}
|
||||
|
||||
@@ -709,17 +709,17 @@
|
||||
},
|
||||
{
|
||||
"Id": "3.1.8",
|
||||
"Description": "Ensure that Network Security Group Flow logs are captured and sent to Log Analytics",
|
||||
"Description": "Ensure that Network Watcher flow logs are captured and sent to Log Analytics",
|
||||
"Checks": [
|
||||
"network_flow_log_captured_sent"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Network Security Group Flow logs are captured and sent to Log Analytics",
|
||||
"Title": "Network Watcher flow logs are captured and sent to Log Analytics",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Ensure that network flow logs are collected and sent to a central Log Analytics workspace for monitoring and analysis.",
|
||||
"AdditionalInformation": "Capturing network flow logs provides visibility into traffic patterns across your network, helping detect anomalies, potential lateral movement, and security threats. These logs integrate with Azure Monitor and Azure Sentinel, enabling advanced analytics and visualization for improved network security and incident response.",
|
||||
"AttributeDescription": "Ensure that Network Watcher flow logs for supported targets, such as virtual networks and network security groups, are collected and sent to a central Log Analytics workspace for monitoring and analysis.",
|
||||
"AdditionalInformation": "Capturing Network Watcher flow logs provides visibility into traffic patterns across your network, helping detect anomalies, potential lateral movement, and security threats. These logs integrate with Azure Monitor and Azure Sentinel, enabling advanced analytics and visualization for improved network security and incident response. For new deployments, prefer virtual network flow logs because NSG flow logs are on the retirement path.",
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
@@ -763,17 +763,17 @@
|
||||
},
|
||||
{
|
||||
"Id": "3.2.1",
|
||||
"Description": "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'",
|
||||
"Description": "Ensure that Network Watcher flow log retention period is '0 or at least 90 days'",
|
||||
"Checks": [
|
||||
"network_flow_log_more_than_90_days"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Title": "Network Security Group Flow Log retention period is 'greater than 90 days'",
|
||||
"Title": "Network Watcher flow log retention period is '0 or at least 90 days'",
|
||||
"Section": "3. Logging and Monitoring",
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Enable Network Security Group (NSG) Flow Logs and configure the retention period to at least 90 days to capture and store IP traffic data for security monitoring and analysis.",
|
||||
"AdditionalInformation": "NSG Flow Logs provide visibility into network traffic, helping detect anomalies, unauthorized access, and potential security breaches. Retaining logs for at least 90 days ensures that historical data is available for incident investigation, compliance, and forensic analysis, strengthening overall network security monitoring.",
|
||||
"AttributeDescription": "Enable Network Watcher flow logs for supported targets, such as virtual networks and network security groups, and configure the retention period to 0 for unlimited retention or at least 90 days to capture and store IP traffic data for security monitoring and analysis.",
|
||||
"AdditionalInformation": "Network Watcher flow logs provide visibility into network traffic, helping detect anomalies, unauthorized access, and potential security breaches. Retaining logs for 0 days (unlimited) or at least 90 days ensures that historical data is available for incident investigation, compliance, and forensic analysis, strengthening overall network security monitoring. For new deployments, prefer virtual network flow logs because NSG flow logs are on the retirement path.",
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
|
||||
@@ -653,7 +653,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.1.1",
|
||||
"Description": "Ensure protection against encrypted attachments from untrusted senders is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_encrypted_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -674,7 +676,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.1.2",
|
||||
"Description": "Ensure protection against attachments with scripts from untrusted senders is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_script_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -695,7 +699,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.1.3",
|
||||
"Description": "Ensure protection against anomalous attachment types in emails is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_anomalous_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -785,7 +791,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.3.1",
|
||||
"Description": "Ensure protection against domain spoofing based on similar domain names is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_domain_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -806,7 +814,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.3.2",
|
||||
"Description": "Ensure protection against spoofing of employee names is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_employee_name_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -827,7 +837,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.3.3",
|
||||
"Description": "Ensure protection against inbound emails spoofing your domain is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_inbound_domain_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -848,7 +860,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.3.4",
|
||||
"Description": "Ensure protection against any unauthenticated emails is enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_unauthenticated_email_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
@@ -869,7 +883,9 @@
|
||||
{
|
||||
"Id": "3.1.3.4.3.5",
|
||||
"Description": "Ensure groups are protected from inbound emails spoofing your domain",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_groups_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Apps",
|
||||
|
||||
@@ -649,7 +649,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.5.1",
|
||||
"Description": "Protect against encrypted attachments from untrusted senders SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_encrypted_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -662,7 +664,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.5.2",
|
||||
"Description": "Protect against attachments with scripts from untrusted senders SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_script_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -675,7 +679,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.5.3",
|
||||
"Description": "Protect against anomalous attachment types in emails SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_anomalous_attachment_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -798,7 +804,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.7.1",
|
||||
"Description": "Protect against domain spoofing based on similar domain names SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_domain_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -811,7 +819,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.7.2",
|
||||
"Description": "Protect against spoofing of employee names SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_employee_name_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -824,7 +834,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.7.3",
|
||||
"Description": "Protect against inbound emails spoofing your domain SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_inbound_domain_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -837,7 +849,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.7.4",
|
||||
"Description": "Protect against any unauthenticated emails SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_unauthenticated_email_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
@@ -850,7 +864,9 @@
|
||||
{
|
||||
"Id": "GWS.GMAIL.7.5",
|
||||
"Description": "Protect your Groups from inbound emails spoofing your domain SHALL be enabled",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"gmail_groups_spoofing_protection_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "Gmail",
|
||||
|
||||
@@ -48,7 +48,7 @@ class _MutableTimestamp:
|
||||
|
||||
timestamp = _MutableTimestamp(datetime.today())
|
||||
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
|
||||
prowler_version = "5.25.0"
|
||||
prowler_version = "5.26.0"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
@@ -87,8 +87,8 @@ def get_available_compliance_frameworks(provider=None):
|
||||
providers = [p.value for p in Provider]
|
||||
if provider:
|
||||
providers = [provider]
|
||||
for provider in providers:
|
||||
compliance_dir = f"{actual_directory}/../compliance/{provider}"
|
||||
for current_provider in providers:
|
||||
compliance_dir = f"{actual_directory}/../compliance/{current_provider}"
|
||||
if not os.path.isdir(compliance_dir):
|
||||
continue
|
||||
with os.scandir(compliance_dir) as files:
|
||||
@@ -97,7 +97,9 @@ def get_available_compliance_frameworks(provider=None):
|
||||
available_compliance_frameworks.append(
|
||||
file.name.removesuffix(".json")
|
||||
)
|
||||
# Also scan top-level compliance/ for multi-provider JSONs
|
||||
# Also scan top-level compliance/ for multi-provider (universal) JSONs.
|
||||
# When a specific provider was requested, only include the framework if it
|
||||
# declares support for that provider; otherwise include all universal frameworks.
|
||||
compliance_root = f"{actual_directory}/../compliance"
|
||||
if os.path.isdir(compliance_root):
|
||||
with os.scandir(compliance_root) as files:
|
||||
|
||||
@@ -299,12 +299,22 @@ def print_compliance_frameworks(
|
||||
def print_compliance_requirements(
|
||||
bulk_compliance_frameworks: dict, compliance_frameworks: list
|
||||
):
|
||||
from prowler.lib.check.compliance_models import ComplianceFramework
|
||||
|
||||
for compliance_framework in compliance_frameworks:
|
||||
for key in bulk_compliance_frameworks.keys():
|
||||
framework = bulk_compliance_frameworks[key].Framework
|
||||
provider = bulk_compliance_frameworks[key].Provider
|
||||
version = bulk_compliance_frameworks[key].Version
|
||||
requirements = bulk_compliance_frameworks[key].Requirements
|
||||
entry = bulk_compliance_frameworks[key]
|
||||
is_universal = isinstance(entry, ComplianceFramework)
|
||||
if is_universal:
|
||||
framework = entry.framework
|
||||
provider = entry.provider or "Multi-provider"
|
||||
version = entry.version
|
||||
requirements = entry.requirements
|
||||
else:
|
||||
framework = entry.Framework
|
||||
provider = entry.Provider or "Multi-provider"
|
||||
version = entry.Version
|
||||
requirements = entry.Requirements
|
||||
# We can list the compliance requirements for a given framework using the
|
||||
# bulk_compliance_frameworks keys since they are the compliance specification file name
|
||||
if compliance_framework == key:
|
||||
@@ -313,10 +323,23 @@ def print_compliance_requirements(
|
||||
)
|
||||
for requirement in requirements:
|
||||
checks = ""
|
||||
for check in requirement.Checks:
|
||||
checks += f" {Fore.YELLOW}\t\t{check}\n{Style.RESET_ALL}"
|
||||
if is_universal:
|
||||
req_checks = requirement.checks
|
||||
req_id = requirement.id
|
||||
req_description = requirement.description
|
||||
else:
|
||||
req_checks = requirement.Checks
|
||||
req_id = requirement.Id
|
||||
req_description = requirement.Description
|
||||
if isinstance(req_checks, dict):
|
||||
for prov, check_list in req_checks.items():
|
||||
for check in check_list:
|
||||
checks += f" {Fore.YELLOW}\t\t[{prov}] {check}\n{Style.RESET_ALL}"
|
||||
else:
|
||||
for check in req_checks:
|
||||
checks += f" {Fore.YELLOW}\t\t{check}\n{Style.RESET_ALL}"
|
||||
print(
|
||||
f"Requirement Id: {Fore.MAGENTA}{requirement.Id}{Style.RESET_ALL}\n\t- Description: {requirement.Description}\n\t- Checks:\n{checks}"
|
||||
f"Requirement Id: {Fore.MAGENTA}{req_id}{Style.RESET_ALL}\n\t- Description: {req_description}\n\t- Checks:\n{checks}"
|
||||
)
|
||||
|
||||
|
||||
@@ -726,8 +749,11 @@ def execute(
|
||||
if global_provider.type == "cloudflare":
|
||||
is_finding_muted_args["account_id"] = finding.account_id
|
||||
if global_provider.type == "azure":
|
||||
is_finding_muted_args["subscription_id"] = (
|
||||
global_provider.identity.subscriptions.get(finding.subscription)
|
||||
is_finding_muted_args["subscription_id"] = finding.subscription
|
||||
is_finding_muted_args["subscription_name"] = (
|
||||
global_provider.identity.subscriptions.get(
|
||||
finding.subscription, finding.subscription
|
||||
)
|
||||
)
|
||||
is_finding_muted_args["finding"] = finding
|
||||
finding.muted = global_provider.mutelist.is_finding_muted(
|
||||
|
||||
@@ -22,6 +22,7 @@ def load_checks_to_execute(
|
||||
categories: set = None,
|
||||
resource_groups: set = None,
|
||||
list_checks: bool = False,
|
||||
universal_frameworks: dict = None,
|
||||
) -> set:
|
||||
"""Generate the list of checks to execute based on the cloud provider and the input arguments given"""
|
||||
try:
|
||||
@@ -155,12 +156,21 @@ def load_checks_to_execute(
|
||||
if not bulk_compliance_frameworks:
|
||||
bulk_compliance_frameworks = Compliance.get_bulk(provider=provider)
|
||||
for compliance_framework in compliance_frameworks:
|
||||
checks_to_execute.update(
|
||||
CheckMetadata.list(
|
||||
bulk_compliance_frameworks=bulk_compliance_frameworks,
|
||||
compliance_framework=compliance_framework,
|
||||
# Try universal frameworks first (snake_case dict-keyed checks)
|
||||
if (
|
||||
universal_frameworks
|
||||
and compliance_framework in universal_frameworks
|
||||
):
|
||||
fw = universal_frameworks[compliance_framework]
|
||||
for req in fw.requirements:
|
||||
checks_to_execute.update(req.checks.get(provider.lower(), []))
|
||||
elif compliance_framework in bulk_compliance_frameworks:
|
||||
checks_to_execute.update(
|
||||
CheckMetadata.list(
|
||||
bulk_compliance_frameworks=bulk_compliance_frameworks,
|
||||
compliance_framework=compliance_framework,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Handle if there are categories passed using --categories
|
||||
elif categories:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user