Compare commits

...

16 Commits

Author SHA1 Message Date
Toni de la Fuente 5bb8383f99 feat(integrations): add Elasticsearch integration for OCSF findings
Enable sending OCSF-formatted security findings to Elasticsearch for
real-time analysis and visualization. Supports API key and basic auth,
TLS configuration, bulk indexing with batching, and fail-only filtering.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-10 00:09:28 +01:00
Utwo dd730eec94 feat(app): Helm chart for deploying prowler in k8s (#9835)
Co-authored-by: Cursor <cursoragent@cursor.com>
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2026-02-09 16:43:12 +01:00
Alejandro Bailo afe2e0a09e fix(ui): guard against unknown provider types in ProviderTypeSelector (#9991) 2026-02-09 15:18:50 +01:00
Alejandro Bailo 507d163a50 docs(ui): mark changelog v1.18.1 as released with Prowler v5.18.1 (#9993) 2026-02-09 13:16:44 +01:00
Josema Camacho 530fef5106 chore(attack-pahts): Internet node is now created while Attack Paths scan (#9992) 2026-02-09 12:17:51 +01:00
Josema Camacho 5cbbceb3be chore(attack-pahts): improve attack paths queries attribution (#9983) 2026-02-09 11:07:12 +01:00
Daniel Barranquero fa189e7eb9 docs(openstack): add provider to introduction table (#9990) 2026-02-09 10:33:10 +01:00
Pedro Martín fb966213cc test(e2e): add e2e tests for alibabacloud provider (#9729) 2026-02-09 10:25:26 +01:00
Rubén De la Torre Vico 097a60ebc9 chore(azure): enhance metadata for monitor service (#9622)
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2026-02-09 10:12:57 +01:00
Pedro Martín db03556ef6 chore(readme): update content (#9972) 2026-02-09 09:09:46 +01:00
Josema Camacho ecc8eaf366 feat(skills): create new Attack Packs queries in openCypher (#9975) 2026-02-06 11:57:33 +01:00
Alan Buscaglia 619d1ffc62 chore(ci): remove legacy E2E workflow superseded by optimized v2 (#9977) 2026-02-06 11:20:10 +01:00
Alan Buscaglia 9e20cb2e5a fix(ui): optimize scans page polling to avoid redundant API calls (#9974)
Co-authored-by: pedrooot <pedromarting3@gmail.com>
2026-02-06 10:49:15 +01:00
Prowler Bot cb76e77851 chore(api): Bump version to v1.20.0 (#9968)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-02-05 22:18:33 +01:00
Prowler Bot a24f818547 chore(release): Bump version to v5.19.0 (#9964)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-02-05 22:17:38 +01:00
Prowler Bot e07687ce67 docs: Update version to v5.18.0 (#9965)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-02-05 22:16:42 +01:00
106 changed files with 5518 additions and 750 deletions
+10 -7
View File
@@ -14,7 +14,7 @@ ignored:
- "*.md"
- "**/*.md"
- mkdocs.yml
# Config files that don't affect runtime
- .gitignore
- .gitattributes
@@ -23,7 +23,7 @@ ignored:
- .backportrc.json
- CODEOWNERS
- LICENSE
# IDE/Editor configs
- .vscode/**
- .idea/**
@@ -31,10 +31,13 @@ ignored:
# Examples and contrib (not production code)
- examples/**
- contrib/**
# Skills (AI agent configs, not runtime)
- skills/**
# E2E setup helpers (not runnable tests)
- ui/tests/setups/**
# Permissions docs
- permissions/**
@@ -47,18 +50,18 @@ critical:
- prowler/config/**
- prowler/exceptions/**
- prowler/providers/common/**
# API Core
- api/src/backend/api/models.py
- api/src/backend/config/**
- api/src/backend/conftest.py
# UI Core
- ui/lib/**
- ui/types/**
- ui/config/**
- ui/middleware.ts
# CI/CD changes
- .github/workflows/**
- .github/test-impact.yml
+15 -4
View File
@@ -25,7 +25,7 @@ jobs:
e2e-tests:
needs: impact-analysis
if: |
github.repository == 'prowler-cloud/prowler' &&
github.repository == 'prowler-cloud/prowler' &&
(needs.impact-analysis.outputs.has-ui-e2e == 'true' || needs.impact-analysis.outputs.run-all == 'true')
runs-on: ubuntu-latest
env:
@@ -65,6 +65,10 @@ jobs:
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
E2E_ALIBABACLOUD_ACCOUNT_ID: ${{ secrets.E2E_ALIBABACLOUD_ACCOUNT_ID }}
E2E_ALIBABACLOUD_ACCESS_KEY_ID: ${{ secrets.E2E_ALIBABACLOUD_ACCESS_KEY_ID }}
E2E_ALIBABACLOUD_ACCESS_KEY_SECRET: ${{ secrets.E2E_ALIBABACLOUD_ACCESS_KEY_SECRET }}
E2E_ALIBABACLOUD_ROLE_ARN: ${{ secrets.E2E_ALIBABACLOUD_ROLE_ARN }}
# Pass E2E paths from impact analysis
E2E_TEST_PATHS: ${{ needs.impact-analysis.outputs.ui-e2e }}
RUN_ALL_TESTS: ${{ needs.impact-analysis.outputs.run-all }}
@@ -200,7 +204,14 @@ jobs:
# e.g., "ui/tests/providers/**" -> "tests/providers"
TEST_PATHS="${{ env.E2E_TEST_PATHS }}"
# Remove ui/ prefix and convert ** to empty (playwright handles recursion)
TEST_PATHS=$(echo "$TEST_PATHS" | sed 's|ui/||g' | sed 's|\*\*||g' | tr ' ' '\n' | sort -u | tr '\n' ' ')
TEST_PATHS=$(echo "$TEST_PATHS" | sed 's|ui/||g' | sed 's|\*\*||g' | tr ' ' '\n' | sort -u)
# Drop auth setup helpers (not runnable test suites)
TEST_PATHS=$(echo "$TEST_PATHS" | grep -v '^tests/setups/')
if [[ -z "$TEST_PATHS" ]]; then
echo "No runnable E2E test paths after filtering setups"
exit 0
fi
TEST_PATHS=$(echo "$TEST_PATHS" | tr '\n' ' ')
echo "Resolved test paths: $TEST_PATHS"
pnpm exec playwright test $TEST_PATHS
fi
@@ -222,8 +233,8 @@ jobs:
skip-e2e:
needs: impact-analysis
if: |
github.repository == 'prowler-cloud/prowler' &&
needs.impact-analysis.outputs.has-ui-e2e != 'true' &&
github.repository == 'prowler-cloud/prowler' &&
needs.impact-analysis.outputs.has-ui-e2e != 'true' &&
needs.impact-analysis.outputs.run-all != 'true'
runs-on: ubuntu-latest
steps:
-172
View File
@@ -1,172 +0,0 @@
name: UI - E2E Tests
on:
pull_request:
branches:
- master
- "v5.*"
paths:
- '.github/workflows/ui-e2e-tests.yml'
- 'ui/**'
jobs:
e2e-tests:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
env:
AUTH_SECRET: 'fallback-ci-secret-for-testing'
AUTH_TRUST_HOST: true
NEXTAUTH_URL: 'http://localhost:3000'
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
E2E_AZURE_SUBSCRIPTION_ID: ${{ secrets.E2E_AZURE_SUBSCRIPTION_ID }}
E2E_AZURE_CLIENT_ID: ${{ secrets.E2E_AZURE_CLIENT_ID }}
E2E_AZURE_SECRET_ID: ${{ secrets.E2E_AZURE_SECRET_ID }}
E2E_AZURE_TENANT_ID: ${{ secrets.E2E_AZURE_TENANT_ID }}
E2E_M365_DOMAIN_ID: ${{ secrets.E2E_M365_DOMAIN_ID }}
E2E_M365_CLIENT_ID: ${{ secrets.E2E_M365_CLIENT_ID }}
E2E_M365_SECRET_ID: ${{ secrets.E2E_M365_SECRET_ID }}
E2E_M365_TENANT_ID: ${{ secrets.E2E_M365_TENANT_ID }}
E2E_M365_CERTIFICATE_CONTENT: ${{ secrets.E2E_M365_CERTIFICATE_CONTENT }}
E2E_KUBERNETES_CONTEXT: 'kind-kind'
E2E_KUBERNETES_KUBECONFIG_PATH: /home/runner/.kube/config
E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY: ${{ secrets.E2E_GCP_BASE64_SERVICE_ACCOUNT_KEY }}
E2E_GCP_PROJECT_ID: ${{ secrets.E2E_GCP_PROJECT_ID }}
E2E_GITHUB_APP_ID: ${{ secrets.E2E_GITHUB_APP_ID }}
E2E_GITHUB_BASE64_APP_PRIVATE_KEY: ${{ secrets.E2E_GITHUB_BASE64_APP_PRIVATE_KEY }}
E2E_GITHUB_USERNAME: ${{ secrets.E2E_GITHUB_USERNAME }}
E2E_GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_PERSONAL_ACCESS_TOKEN }}
E2E_GITHUB_ORGANIZATION: ${{ secrets.E2E_GITHUB_ORGANIZATION }}
E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN: ${{ secrets.E2E_GITHUB_ORGANIZATION_ACCESS_TOKEN }}
E2E_ORGANIZATION_ID: ${{ secrets.E2E_ORGANIZATION_ID }}
E2E_OCI_TENANCY_ID: ${{ secrets.E2E_OCI_TENANCY_ID }}
E2E_OCI_USER_ID: ${{ secrets.E2E_OCI_USER_ID }}
E2E_OCI_FINGERPRINT: ${{ secrets.E2E_OCI_FINGERPRINT }}
E2E_OCI_KEY_CONTENT: ${{ secrets.E2E_OCI_KEY_CONTENT }}
E2E_OCI_REGION: ${{ secrets.E2E_OCI_REGION }}
E2E_NEW_USER_PASSWORD: ${{ secrets.E2E_NEW_USER_PASSWORD }}
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1
with:
cluster_name: kind
- name: Modify kubeconfig
run: |
# Modify the kubeconfig to use the kind cluster server to https://kind-control-plane:6443
# from worker service into docker-compose.yml
kubectl config set-cluster kind-kind --server=https://kind-control-plane:6443
kubectl config view
- name: Add network kind to docker compose
run: |
# Add the network kind to the docker compose to interconnect to kind cluster
yq -i '.networks.kind.external = true' docker-compose.yml
# Add network kind to worker service and default network too
yq -i '.services.worker.networks = ["kind","default"]' docker-compose.yml
- name: Fix API data directory permissions
run: docker run --rm -v $(pwd)/_data/api:/data alpine chown -R 1000:1000 /data
- name: Add AWS credentials for testing AWS SDK Default Adding Provider
run: |
echo "Adding AWS credentials for testing AWS SDK Default Adding Provider..."
echo "AWS_ACCESS_KEY_ID=${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}" >> .env
echo "AWS_SECRET_ACCESS_KEY=${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}" >> .env
- name: Start API services
run: |
# Override docker-compose image tag to use latest instead of stable
# This overrides any PROWLER_API_VERSION set in .env file
export PROWLER_API_VERSION=latest
echo "Using PROWLER_API_VERSION=${PROWLER_API_VERSION}"
docker compose up -d api worker worker-beat
- name: Wait for API to be ready
run: |
echo "Waiting for prowler-api..."
timeout=150 # 5 minutes max
elapsed=0
while [ $elapsed -lt $timeout ]; do
if curl -s ${NEXT_PUBLIC_API_BASE_URL}/docs >/dev/null 2>&1; then
echo "Prowler API is ready!"
exit 0
fi
echo "Waiting for prowler-api... (${elapsed}s elapsed)"
sleep 5
elapsed=$((elapsed + 5))
done
echo "Timeout waiting for prowler-api to start"
exit 1
- name: Load database fixtures for E2E tests
run: |
docker compose exec -T api sh -c '
echo "Loading all fixtures from api/fixtures/dev/..."
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
fi
done
echo "All database fixtures loaded successfully!"
'
- name: Setup Node.js environment
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version: '24.13.0'
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Get pnpm store directory
shell: bash
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm and Next.js cache
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: |
${{ env.STORE_PATH }}
./ui/node_modules
./ui/.next/cache
key: ${{ runner.os }}-pnpm-nextjs-${{ hashFiles('ui/pnpm-lock.yaml') }}-${{ hashFiles('ui/**/*.ts', 'ui/**/*.tsx', 'ui/**/*.js', 'ui/**/*.jsx') }}
restore-keys: |
${{ runner.os }}-pnpm-nextjs-${{ hashFiles('ui/pnpm-lock.yaml') }}-
${{ runner.os }}-pnpm-nextjs-
- name: Install UI dependencies
working-directory: ./ui
run: pnpm install --frozen-lockfile --prefer-offline
- name: Build UI application
working-directory: ./ui
run: pnpm run build
- name: Cache Playwright browsers
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('ui/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-playwright-
- name: Install Playwright browsers
working-directory: ./ui
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: pnpm run test:e2e:install
- name: Run E2E tests
working-directory: ./ui
run: pnpm run test:e2e
- name: Upload test reports
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: failure()
with:
name: playwright-report
path: ui/playwright-report/
retention-days: 30
- name: Cleanup services
if: always()
run: |
echo "Shutting down services..."
docker compose down -v || true
echo "Cleanup completed"
+4
View File
@@ -44,6 +44,7 @@ Use these skills for detailed patterns on-demand:
| `prowler-commit` | Professional commits (conventional-commits) | [SKILL.md](skills/prowler-commit/SKILL.md) |
| `prowler-pr` | Pull request conventions | [SKILL.md](skills/prowler-pr/SKILL.md) |
| `prowler-docs` | Documentation style guide | [SKILL.md](skills/prowler-docs/SKILL.md) |
| `prowler-attack-paths-query` | Create Attack Paths openCypher queries | [SKILL.md](skills/prowler-attack-paths-query/SKILL.md) |
| `skill-creator` | Create new AI agent skills | [SKILL.md](skills/skill-creator/SKILL.md) |
### Auto-invoke Skills
@@ -56,6 +57,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
| Adding DRF pagination or permissions | `django-drf` |
| Adding new providers | `prowler-provider` |
| Adding services to existing providers | `prowler-provider` |
| Adding privilege escalation detection queries | `prowler-attack-paths-query` |
| After creating/modifying a skill | `skill-sync` |
| App Router / Server Actions | `nextjs-15` |
| Building AI chat features | `ai-sdk-5` |
@@ -63,6 +65,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
| Create PR that requires changelog entry | `prowler-changelog` |
| Create a PR with gh pr create | `prowler-pr` |
| Creating API endpoints | `jsonapi` |
| Creating Attack Paths queries | `prowler-attack-paths-query` |
| Creating ViewSets, serializers, or filters in api/ | `django-drf` |
| Creating Zod schemas | `zod-4` |
| Creating a git commit | `prowler-commit` |
@@ -92,6 +95,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
| Understand changelog gate and no-changelog label behavior | `prowler-ci` |
| Understand review ownership with CODEOWNERS | `prowler-pr` |
| Update CHANGELOG.md in any component | `prowler-changelog` |
| Updating existing Attack Paths queries | `prowler-attack-paths-query` |
| Updating existing checks and metadata | `prowler-sdk-check` |
| Using Zustand stores | `zustand-5` |
| Working on MCP server tools | `prowler-mcp` |
+5 -4
View File
@@ -104,18 +104,19 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) | Support | Interface |
|---|---|---|---|---|---|---|
| AWS | 584 | 84 | 40 | 17 | Official | UI, API, CLI |
| Azure | 169 | 22 | 16 | 12 | Official | UI, API, CLI |
| AWS | 585 | 84 | 40 | 17 | Official | UI, API, CLI |
| Azure | 169 | 22 | 17 | 13 | Official | UI, API, CLI |
| GCP | 100 | 17 | 14 | 7 | Official | UI, API, CLI |
| Kubernetes | 84 | 7 | 7 | 9 | Official | UI, API, CLI |
| GitHub | 20 | 2 | 1 | 2 | Official | UI, API, CLI |
| M365 | 71 | 7 | 4 | 3 | Official | UI, API, CLI |
| M365 | 72 | 7 | 4 | 4 | Official | UI, API, CLI |
| OCI | 52 | 14 | 1 | 12 | Official | UI, API, CLI |
| Alibaba Cloud | 64 | 9 | 2 | 9 | Official | UI, API, CLI |
| Cloudflare | 23 | 2 | 0 | 5 | Official | CLI |
| Cloudflare | 29 | 3 | 0 | 5 | Official | CLI |
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | UI, API, CLI |
| MongoDB Atlas | 10 | 3 | 0 | 3 | Official | UI, API, CLI |
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
| OpenStack | 1 | 1 | 0 | 2 | Official | CLI |
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
> [!Note]
+4
View File
@@ -3,6 +3,7 @@
> **Skills Reference**: For detailed patterns, use these skills:
> - [`prowler-api`](../skills/prowler-api/SKILL.md) - Models, Serializers, Views, RLS patterns
> - [`prowler-test-api`](../skills/prowler-test-api/SKILL.md) - Testing patterns (pytest-django)
> - [`prowler-attack-paths-query`](../skills/prowler-attack-paths-query/SKILL.md) - Attack Paths openCypher queries
> - [`django-drf`](../skills/django-drf/SKILL.md) - Generic DRF patterns
> - [`jsonapi`](../skills/jsonapi/SKILL.md) - Strict JSON:API v1.1 spec compliance
> - [`pytest`](../skills/pytest/SKILL.md) - Generic pytest patterns
@@ -15,9 +16,11 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|--------|-------|
| Add changelog entry for a PR or feature | `prowler-changelog` |
| Adding DRF pagination or permissions | `django-drf` |
| Adding privilege escalation detection queries | `prowler-attack-paths-query` |
| Committing changes | `prowler-commit` |
| Create PR that requires changelog entry | `prowler-changelog` |
| Creating API endpoints | `jsonapi` |
| Creating Attack Paths queries | `prowler-attack-paths-query` |
| Creating ViewSets, serializers, or filters in api/ | `django-drf` |
| Creating a git commit | `prowler-commit` |
| Creating/modifying models, views, serializers | `prowler-api` |
@@ -27,6 +30,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
| Reviewing JSON:API compliance | `jsonapi` |
| Testing RLS tenant isolation | `prowler-test-api` |
| Update CHANGELOG.md in any component | `prowler-changelog` |
| Updating existing Attack Paths queries | `prowler-attack-paths-query` |
| Writing Prowler API tests | `prowler-test-api` |
| Writing Python tests with pytest | `pytest` |
+9
View File
@@ -2,6 +2,15 @@
All notable changes to the **Prowler API** are documented in this file.
## [1.20.0] (Prowler UNRELEASED)
### 🔄 Changed
- Attack Paths: Queries definition now has short description and attribution [(#9983)](https://github.com/prowler-cloud/prowler/pull/9983)
- Attack Paths: Internet node is created while scan [(#9992)](https://github.com/prowler-cloud/prowler/pull/9992)
---
## [1.19.0] (Prowler v5.18.0)
### 🚀 Added
+1 -1
View File
@@ -49,7 +49,7 @@ name = "prowler-api"
package-mode = false
# Needed for the SDK compatibility
requires-python = ">=3.11,<3.13"
version = "1.19.0"
version = "1.20.0"
[project.scripts]
celery = "src.backend.config.settings.celery"
File diff suppressed because it is too large Load Diff
@@ -1,6 +1,14 @@
from dataclasses import dataclass, field
@dataclass
class AttackPathsQueryAttribution:
"""Source attribution for an Attack Path query."""
text: str
link: str
@dataclass
class AttackPathsQueryParameterDefinition:
"""
@@ -23,7 +31,9 @@ class AttackPathsQueryDefinition:
id: str
name: str
short_description: str
description: str
provider: str
cypher: str
attribution: AttackPathsQueryAttribution | None = None
parameters: list[AttackPathsQueryParameterDefinition] = field(default_factory=list)
+35 -3
View File
@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.19.0
version: 1.20.0
description: |-
Prowler API specification.
@@ -616,7 +616,7 @@ paths:
operationId: attack_paths_scans_queries_retrieve
description: Retrieve the catalog of Attack Paths queries available for this
Attack Paths scan.
summary: List attack paths queries
summary: List Attack Paths queries
parameters:
- in: query
name: fields[attack-paths-scans]
@@ -714,7 +714,7 @@ paths:
description: Bad request (e.g., Unknown Attack Paths query for the selected
provider)
'404':
description: No attack paths found for the given query and parameters
description: No Attack Paths found for the given query and parameters
'500':
description: Attack Paths query execution failed due to a database error
/api/v1/compliance-overviews:
@@ -12438,6 +12438,8 @@ components:
type: string
name:
type: string
short_description:
type: string
description:
type: string
provider:
@@ -12446,12 +12448,42 @@ components:
type: array
items:
$ref: '#/components/schemas/AttackPathsQueryParameter'
attribution:
allOf:
- $ref: '#/components/schemas/AttackPathsQueryAttribution'
nullable: true
required:
- id
- name
- short_description
- description
- provider
- parameters
AttackPathsQueryAttribution:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
type: string
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
enum:
- attack-paths-query-attributions
id: {}
attributes:
type: object
properties:
text:
type: string
link:
type: string
required:
- text
- link
AttackPathsQueryParameter:
type: object
required:
@@ -83,6 +83,7 @@ def test_execute_attack_paths_query_serializes_graph(
definition = attack_paths_query_definition_factory(
id="aws-rds",
name="RDS",
short_description="Short desc",
description="",
cypher="MATCH (n) RETURN n",
parameters=[],
@@ -143,6 +144,7 @@ def test_execute_attack_paths_query_wraps_graph_errors(
definition = attack_paths_query_definition_factory(
id="aws-rds",
name="RDS",
short_description="Short desc",
description="",
cypher="MATCH (n) RETURN n",
parameters=[],
+3
View File
@@ -3830,6 +3830,7 @@ class TestAttackPathsScanViewSet:
AttackPathsQueryDefinition(
id="aws-rds",
name="RDS inventory",
short_description="List account RDS assets.",
description="List account RDS assets",
provider=provider.provider,
cypher="MATCH (n) RETURN n",
@@ -3892,6 +3893,7 @@ class TestAttackPathsScanViewSet:
query_definition = AttackPathsQueryDefinition(
id="aws-rds",
name="RDS inventory",
short_description="List account RDS assets.",
description="List account RDS assets",
provider=provider.provider,
cypher="MATCH (n) RETURN n",
@@ -4049,6 +4051,7 @@ class TestAttackPathsScanViewSet:
query_definition = AttackPathsQueryDefinition(
id="aws-empty",
name="empty",
short_description="",
description="",
provider=provider.provider,
cypher="MATCH (n) RETURN n",
+10
View File
@@ -1176,6 +1176,14 @@ class AttackPathsScanSerializer(RLSSerializer):
return provider.uid if provider else None
class AttackPathsQueryAttributionSerializer(BaseSerializerV1):
text = serializers.CharField()
link = serializers.CharField()
class JSONAPIMeta:
resource_name = "attack-paths-query-attributions"
class AttackPathsQueryParameterSerializer(BaseSerializerV1):
name = serializers.CharField()
label = serializers.CharField()
@@ -1190,7 +1198,9 @@ class AttackPathsQueryParameterSerializer(BaseSerializerV1):
class AttackPathsQuerySerializer(BaseSerializerV1):
id = serializers.CharField()
name = serializers.CharField()
short_description = serializers.CharField()
description = serializers.CharField()
attribution = AttackPathsQueryAttributionSerializer(allow_null=True, required=False)
provider = serializers.CharField()
parameters = AttackPathsQueryParameterSerializer(many=True)
+1 -1
View File
@@ -392,7 +392,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.19.0"
spectacular_settings.VERSION = "1.20.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
+1
View File
@@ -1663,6 +1663,7 @@ def attack_paths_query_definition_factory():
definition_payload = {
"id": "aws-test",
"name": "Attack Paths Test Query",
"short_description": "Synthetic short description for tests.",
"description": "Synthetic Attack Paths definition for tests.",
"provider": "aws",
"cypher": "RETURN 1",
@@ -12,8 +12,10 @@ BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
# Neo4j internal labels (Prowler-specific, not provider-specific)
# - `ProwlerFinding`: Label for finding nodes created by Prowler and linked to cloud resources.
# - `ProviderResource`: Added to ALL synced nodes for provider isolation and drop/query ops.
# - `Internet`: Singleton node representing external internet access for exposed-resource queries.
PROWLER_FINDING_LABEL = "ProwlerFinding"
PROVIDER_RESOURCE_LABEL = "ProviderResource"
INTERNET_NODE_LABEL = "Internet"
@dataclass(frozen=True)
@@ -6,6 +6,7 @@ from cartography.client.core.tx import run_write_query
from celery.utils.log import get_task_logger
from tasks.jobs.attack_paths.config import (
INTERNET_NODE_LABEL,
PROWLER_FINDING_LABEL,
PROVIDER_RESOURCE_LABEL,
)
@@ -30,6 +31,8 @@ FINDINGS_INDEX_STATEMENTS = [
f"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.provider_uid);",
f"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.lastupdated);",
f"CREATE INDEX prowler_finding_status IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.status);",
# Internet node index for MERGE lookups
f"CREATE INDEX internet_id IF NOT EXISTS FOR (n:{INTERNET_NODE_LABEL}) ON (n.id);",
]
# Indexes for provider resource sync operations
@@ -0,0 +1,67 @@
"""
Internet node enrichment for Attack Paths graph.
Creates a real Internet node and CAN_ACCESS relationships to
internet-exposed resources (EC2Instance, LoadBalancer, LoadBalancerV2)
in the temporary scan database before sync.
"""
import neo4j
from cartography.config import Config as CartographyConfig
from celery.utils.log import get_task_logger
from api.models import Provider
from prowler.config import config as ProwlerConfig
from tasks.jobs.attack_paths.config import get_root_node_label
from tasks.jobs.attack_paths.queries import (
CREATE_CAN_ACCESS_RELATIONSHIPS_TEMPLATE,
CREATE_INTERNET_NODE,
render_cypher_template,
)
logger = get_task_logger(__name__)
def analysis(
neo4j_session: neo4j.Session,
prowler_api_provider: Provider,
config: CartographyConfig,
) -> int:
"""
Create Internet node and CAN_ACCESS relationships to exposed resources.
Args:
neo4j_session: Active Neo4j session (temp database).
prowler_api_provider: The Prowler API provider instance.
config: Cartography configuration with update_tag.
Returns:
Number of CAN_ACCESS relationships created.
"""
provider_uid = str(prowler_api_provider.uid)
parameters = {
"provider_uid": provider_uid,
"last_updated": config.update_tag,
"prowler_version": ProwlerConfig.prowler_version,
}
logger.info(f"Creating Internet node for provider {provider_uid}")
neo4j_session.run(CREATE_INTERNET_NODE, parameters)
query = render_cypher_template(
CREATE_CAN_ACCESS_RELATIONSHIPS_TEMPLATE,
{"__ROOT_LABEL__": get_root_node_label(prowler_api_provider.provider)},
)
logger.info(
f"Creating CAN_ACCESS relationships from Internet to exposed resources for {provider_uid}"
)
result = neo4j_session.run(query, parameters)
relationships_merged = result.single().get("relationships_merged", 0)
logger.info(
f"Created {relationships_merged} CAN_ACCESS relationships for provider {provider_uid}"
)
return relationships_merged
@@ -1,5 +1,6 @@
# Cypher query templates for Attack Paths operations
from tasks.jobs.attack_paths.config import (
INTERNET_NODE_LABEL,
PROWLER_FINDING_LABEL,
PROVIDER_RESOURCE_LABEL,
)
@@ -91,6 +92,37 @@ CLEANUP_FINDINGS_TEMPLATE = f"""
RETURN COUNT(finding) AS deleted_findings_count
"""
# Internet queries (used by internet.py)
# ---------------------------------------
CREATE_INTERNET_NODE = f"""
MERGE (internet:{INTERNET_NODE_LABEL} {{id: 'Internet'}})
ON CREATE SET
internet.name = 'Internet',
internet.firstseen = timestamp(),
internet.lastupdated = $last_updated,
internet._module_name = 'cartography:prowler',
internet._module_version = $prowler_version
ON MATCH SET
internet.lastupdated = $last_updated
"""
CREATE_CAN_ACCESS_RELATIONSHIPS_TEMPLATE = f"""
MATCH (account:__ROOT_LABEL__ {{id: $provider_uid}})-->(resource)
WHERE resource.exposed_internet = true
WITH resource
MATCH (internet:{INTERNET_NODE_LABEL} {{id: 'Internet'}})
MERGE (internet)-[r:CAN_ACCESS]->(resource)
ON CREATE SET
r.firstseen = timestamp(),
r.lastupdated = $last_updated,
r._module_name = 'cartography:prowler',
r._module_version = $prowler_version
ON MATCH SET
r.lastupdated = $last_updated
RETURN COUNT(r) AS relationships_merged
"""
# Sync queries (used by sync.py)
# -------------------------------
@@ -16,7 +16,7 @@ from api.models import (
StateChoices,
)
from api.utils import initialize_prowler_provider
from tasks.jobs.attack_paths import db_utils, findings, sync, utils
from tasks.jobs.attack_paths import db_utils, findings, internet, sync, utils
from tasks.jobs.attack_paths.config import get_cartography_ingestion_function
# Without this Celery goes crazy with Cartography logging
@@ -135,7 +135,15 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
cartography_analysis.run(tmp_neo4j_session, tmp_cartography_config)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
# Adding Prowler nodes and relationships
# Creating Internet node and CAN_ACCESS relationships
logger.info(
f"Creating Internet graph for AWS account {prowler_api_provider.uid}"
)
internet.analysis(
tmp_neo4j_session, prowler_api_provider, tmp_cartography_config
)
# Adding Prowler Finding nodes and relationships
logger.info(
f"Syncing Prowler analysis for AWS account {prowler_api_provider.uid}"
)
@@ -4,6 +4,7 @@ from unittest.mock import MagicMock, call, patch
import pytest
from tasks.jobs.attack_paths import findings as findings_module
from tasks.jobs.attack_paths import internet as internet_module
from tasks.jobs.attack_paths.scan import run as attack_paths_run
from api.models import (
@@ -37,6 +38,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@@ -67,6 +69,7 @@ class TestAttackPathsRun:
mock_cartography_ontology,
mock_findings_indexes,
mock_findings_analysis,
mock_internet_analysis,
mock_sync_indexes,
mock_drop_subgraph,
mock_sync,
@@ -139,6 +142,7 @@ class TestAttackPathsRun:
# These use tmp_cartography_config (neo4j_database="db-scan-id")
mock_cartography_analysis.assert_called_once()
mock_cartography_ontology.assert_called_once()
mock_internet_analysis.assert_called_once()
mock_findings_analysis.assert_called_once()
mock_drop_subgraph.assert_called_once_with(
database="tenant-db",
@@ -207,6 +211,7 @@ class TestAttackPathsRun:
patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run"),
patch("tasks.jobs.attack_paths.scan.cartography_analysis.run"),
patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes"),
patch("tasks.jobs.attack_paths.scan.internet.analysis"),
patch("tasks.jobs.attack_paths.scan.findings.analysis"),
patch(
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan",
@@ -757,3 +762,45 @@ class TestAttackPathsFindingsHelpers:
findings_module.load_findings(mock_session, empty_gen(), provider, config)
mock_session.run.assert_not_called()
class TestInternetAnalysis:
def _make_provider_and_config(self):
provider = MagicMock()
provider.provider = "aws"
provider.uid = "123456789012"
config = SimpleNamespace(update_tag=1234567890)
return provider, config
def test_analysis_creates_node_and_relationships(self):
"""Verify both Cypher statements are executed and relationship count returned."""
mock_session = MagicMock()
mock_result = MagicMock()
mock_result.single.return_value = {"relationships_merged": 3}
mock_session.run.side_effect = [None, mock_result]
provider, config = self._make_provider_and_config()
with patch(
"tasks.jobs.attack_paths.internet.get_root_node_label",
return_value="AWSAccount",
):
result = internet_module.analysis(mock_session, provider, config)
assert mock_session.run.call_count == 2
assert result == 3
def test_analysis_zero_exposed_resources(self):
"""When no resources are exposed, zero relationships are created."""
mock_session = MagicMock()
mock_result = MagicMock()
mock_result.single.return_value = {"relationships_merged": 0}
mock_session.run.side_effect = [None, mock_result]
provider, config = self._make_provider_and_config()
with patch(
"tasks.jobs.attack_paths.internet.get_root_node_label",
return_value="AWSAccount",
):
result = internet_module.analysis(mock_session, provider, config)
assert result == 0
+24
View File
@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
examples
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
+12
View File
@@ -0,0 +1,12 @@
dependencies:
- name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts
version: 18.2.0
- name: valkey
repository: https://valkey.io/valkey-helm/
version: 0.9.3
- name: neo4j
repository: https://helm.neo4j.com/neo4j
version: 2025.12.1
digest: sha256:da19233c6832727345fcdb314d683d30aa347d349f270023f3a67149bffb009b
generated: "2026-01-26T12:00:06.798702+02:00"
+33
View File
@@ -0,0 +1,33 @@
apiVersion: v2
name: prowler
description: Prowler is an Open Cloud Security tool for AWS, Azure, GCP and Kubernetes. It helps for continuous monitoring, security assessments and audits, incident response, compliance, hardening and forensics readiness.
type: application
version: 0.0.1
appVersion: "5.17.0"
home: https://prowler.com
icon: https://cdn.prod.website-files.com/68c4ec3f9fb7b154fbcb6e36/68c5e0fea5d0059b9e05834b_Link.png
keywords:
- security
- aws
- azure
- gcp
- kubernetes
maintainers:
- name: Mihai
email: mihai.legat@gmail.com
dependencies:
# https://artifacthub.io/packages/helm/bitnami/postgresql
- name: postgresql
version: 18.2.0
repository: oci://registry-1.docker.io/bitnamicharts
condition: postgresql.enabled
# https://valkey.io/valkey-helm/
- name: valkey
version: 0.9.3
repository: https://valkey.io/valkey-helm/
condition: valkey.enabled
# https://helm.neo4j.com/neo4j
- name: neo4j
version: 2025.12.1
repository: https://helm.neo4j.com/neo4j
condition: neo4j.enabled
+143
View File
@@ -0,0 +1,143 @@
<!--
This README is the one shown on Artifact Hub.
Images should use absolute URLs.
-->
# Prowler App Helm Chart
![Version: 0.0.1](https://img.shields.io/badge/Version-0.0.1-informational?style=flat-square)
![AppVersion: 5.17.0](https://img.shields.io/badge/AppVersion-5.17.0-informational?style=flat-square)
Prowler is an Open Cloud Security tool for AWS, Azure, GCP and Kubernetes. It helps for continuous monitoring, security assessments and audits, incident response, compliance, hardening and forensics readiness. Includes CIS, NIST 800, NIST CSF, CISA, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, Well-Architected Security, ENS and more.
## Architecture
The Prowler App consists of three main components:
- **Prowler UI**: A user-friendly web interface for running Prowler and viewing results, powered by Next.js.
- **Prowler API**: The backend API that executes Prowler scans and stores the results, built with Django REST Framework.
- **Prowler SDK**: A Python SDK that integrates with the Prowler CLI for advanced functionality.
The app leverages the following supporting infrastructure:
- **PostgreSQL**: Used for persistent storage of scan results.
- **Celery Workers**: Facilitate asynchronous execution of Prowler scans.
- **Valkey**: An in-memory database serving as a message broker for the Celery workers.
- **Neo4j**: Graph Database
- **Keda**: Kubernetes Event-driven Autoscaling (Keda) automatically scales the number of Celery worker pods based on the workload, ensuring efficient resource utilization and responsiveness.
## Setup
This guide walks you through installing Prowler App using Helm. For a minimal installation example, see the [minimal installation example](./examples/minimal-installation/).
### Prerequisites
- Kubernetes cluster (1.24+)
- Helm 3.x installed
- `kubectl` configured to access your cluster
- Access to the Prowler Helm chart repository (or local chart)
### Step 1: Create Required Secrets
Before installing the Helm chart, you must create a Kubernetes Secret containing the required authentication keys and secrets.
1. **Generate the required keys and secrets:**
```bash
# Generate Django token signing key (private key)
openssl genrsa -out private.pem 2048
# Generate Django token verifying key (public key)
openssl rsa -in private.pem -pubout -out public.pem
# Generate Django secrets encryption key
openssl rand -base64 32
# Generate Auth secret
openssl rand -base64 32
```
2. **Create the secret file:**
Create a file named `secrets.yaml` with the following structure:
```yaml
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: prowler-secret
stringData:
DJANGO_TOKEN_SIGNING_KEY: |
-----BEGIN PRIVATE KEY-----
[paste your private key here]
-----END PRIVATE KEY-----
DJANGO_TOKEN_VERIFYING_KEY: |
-----BEGIN PUBLIC KEY-----
[paste your public key here]
-----END PUBLIC KEY-----
DJANGO_SECRETS_ENCRYPTION_KEY: "[paste your encryption key here]"
AUTH_SECRET: "[paste your auth secret here]"
NEO4J_PASSWORD: "[prowler-password]"
NEO4J_AUTH: "neo4j/[prowler-password]"
```
> **Note:** You can use the [example secrets file](./examples/minimal-installation/secrets.yaml) as a template, but **always replace the placeholder values with your own secure keys** before applying.
3. **Apply the secret to your cluster:**
```bash
kubectl apply -f secrets.yaml
```
### Step 2: Configure Values
Create a `values.yaml` file to customize your installation. At minimum, you need to configure the UI access method.
**Option A: Using Ingress (Recommended for production)**
```yaml
ui:
ingress:
enabled: true
hosts:
- host: prowler.example.com
paths:
- path: /
pathType: ImplementationSpecific
```
**Option B: Using authUrl (For proxy setups)**
```yaml
ui:
authUrl: prowler.example.com
```
> **Note:** See the [minimal installation example](./examples/minimal-installation/values.yaml) for a complete reference.
### Step 3: Install the Chart
Install Prowler App using Helm:
```bash
helm dependency update
helm install prowler prowler/prowler-app -f values.yaml
```
### Using Existing PostgreSQL and Valkey Instances
By default, this Chart uses Bitnami's Charts to deploy [PostgreSQL](https://artifacthub.io/packages/helm/bitnami/postgresql), [Neo4j](https://helm.neo4j.com/neo4j) and [Valkey official helm chart](https://valkey.io/valkey-helm/). **Note:** This default setup is not production-ready.
To connect to existing PostgreSQL, Neo4j and Valkey instances:
1. Create a `Secret` containing the correct database and message broker credentials
2. Reference the secret in the [values.yaml](values.yaml) file api->secrets list
## Contributing
Feel free to contact the maintainer of this repository for any questions or concerns. Contributions are encouraged and appreciated.
@@ -0,0 +1,46 @@
# Minimal Installation Example
This example demonstrates a minimal installation of Prowler in a Kubernetes cluster.
## Installation
To install Prowler using this example:
1. First, create the required secret:
```bash
# Edit secret.yaml and set secure values before applying
kubectl apply -f secret.yaml
```
1. Install the chart using the base values file:
```bash
# Basic installation
helm install prowler prowler/prowler-app -f values.yaml
```
## Configuration
The example contains the following configuration files:
### `secret.yaml`
Contains all required secrets for the Prowler installation. **Must be applied before installing the Helm chart**. Make sure to replace all placeholder values with secure values before applying.
### `values.yaml`
```yaml
ui:
# Note: You should set either `authUrl` if you use prowler behind a proxy or enable `ingress`.
# Example with authUrl:
# authUrl: example.prowler.com
# Example with ingress:
ingress:
enabled: true
hosts:
- host: example.prowler.com
paths:
- path: /
pathType: ImplementationSpecific
```
Make sure to adjust the hostname in the values file to match your environment before installing.
@@ -0,0 +1,58 @@
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: prowler-secret
stringData:
# openssl genrsa -out private.pem 2048
DJANGO_TOKEN_SIGNING_KEY: |
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCIro0QiLAxw7rF
GO0NgAWJfkpYE5ysMGDCbId07HUrv+/SCoRjqKVzGJVIvmNP5oByzSehPgswW9v3
3dqe2r9sCS1JyMa+XO3qfZCR0uRDcPCwZjIyr0QQLpWAymdBa8baeHsU1/3Orjcb
Vrr+lNx4HQJOiSn094iXPReW/25hYeq/SXs79V2CR87PGdoZAhb8IllAxJgdfkeB
/iWohY/1vfRTmIuMweWGXk0aKzPsBdvE/DqG4HjiNVEPh18G3vid0YTZNmm7u8vO
Cue3x9NQWGHA4QtxNtLtxlHcOEryqZ9ChO2nC+ew0Xl/v706XFNyLFicjisIKNQo
qdkaMS33AgMBAAECggEAGdJIChCYoL4mYafk2MEPyrrWFq+V0J3PGcvhB0DInfxD
tT2RZzZsE0NYqIZ3Qpf8OjPxwa9z863W74u1Cn+u3B0bti29BieONteD4VijEO6c
OecEorijth7m1Y7nVN+kkI9kSTrI0yvsczi+WOwMfpCUZ/vXtlSxNEkxVLBqzPCo
9VxAFIjgWOj2rpw8nxPedves36PUrC5ghLqrOTe1jmw/Di0++47AXG+DsTXc00sc
5+oybopm3Kimsxrqbf9s8SZf2A8NiwqcbLj8OtP2j2g4TCEgZYLD5Zmt+JN/wN4B
WsQG/Hwp4KPPm9QTHEpuuoPFP1CZWZeq8gPcV4apYQKBgQC+TuXjJCYhZqNIttTZ
z/i3hkKUEKQLkzTZnXaDzL5wHyEMVqM2E/WkilO0C9ZZwh0ENPzkp+JsHf7LEhHy
wSHOti81VzUCjN/YpCBKlOlClqSiDlOonImrobLei8xgvmA0VmGtirCXZyyzZUoV
OyPr17WpK6G/M5piX59MvKQg0QKBgQC33NBoQFD8A6FjrTopYmWfK099k9uQh9NE
bvUYsNAPunSDslmc/0PPHQC7fRX5Ime2BinXAN1PYtB/Fsu3jv/+FCUM5hVil0Dd
KBvt13+RYSCJKlhcGP1EkWoIg1F2XXBOZKJrC8VQ+Vyl2t06UcWQqy5M9J4VZaqI
fruOLU/URwKBgE55GjJfZZnASPRi78IhD94dbra/ZeWf/dr+IzCV7LEvJOGBmCtk
b5Y5s+o6N1krwetKLj3bPHJ4q+fwu5XuLZKfbTgBjcpPbL5YbzhRzx22IIzye2y7
n8k2FBvQaaY62lC6jeyRk9/am4Qd8D5w9I77k9z+MOQ20yJda8KoxsUBAoGBAIQ9
5QPmppjsf4ry0C9t30uhWhYnX7fPiYviBpVQrwVxBVan076Q9xOjd6BicohzT4bj
XfqPW546o12VZsbKqqLzmEZzwpPb2EJ5E8V4xv8ojb86Xr03GArWUB55XQE2aY1o
4kz99VitUg7UoWPN5ryL8sxU8NLRAdwU0w+K1a0HAoGAZaU7O94u9IIPZ6Ohobs2
Vjf/eV0brCKgX61b4z/YhuJdZsyTujhBZUihZwqR696kiFKuzmHx1ghE2ITvnPVN
q0iHxRZzBCnRQ+mQlS0trzphaCP0NVy3osFeAD9mJfnOnSmkU0ua4F81mkvke1eN
6nnaoAdy2lmMr96/Tye2ty4=
-----END PRIVATE KEY-----
# openssl rsa -in private.pem -pubout -out public.pem
DJANGO_TOKEN_VERIFYING_KEY: |
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiK6NEIiwMcO6xRjtDYAF
iX5KWBOcrDBgwmyHdOx1K7/v0gqEY6ilcxiVSL5jT+aAcs0noT4LMFvb993antq/
bAktScjGvlzt6n2QkdLkQ3DwsGYyMq9EEC6VgMpnQWvG2nh7FNf9zq43G1a6/pTc
eB0CTokp9PeIlz0Xlv9uYWHqv0l7O/VdgkfOzxnaGQIW/CJZQMSYHX5Hgf4lqIWP
9b30U5iLjMHlhl5NGisz7AXbxPw6huB44jVRD4dfBt74ndGE2TZpu7vLzgrnt8fT
UFhhwOELcTbS7cZR3DhK8qmfQoTtpwvnsNF5f7+9OlxTcixYnI4rCCjUKKnZGjEt
9wIDAQAB
-----END PUBLIC KEY-----
# openssl rand -base64 32
DJANGO_SECRETS_ENCRYPTION_KEY: "qYAIWnRK52aBT5YQkBoMEw08j7j3+QIPZXS6+A8Su44="
# openssl rand -base64 32
AUTH_SECRET: "CM9w3Nco2P1RdHaYmD+fmy2nJmSofusdHd4g7Z4KDG4="
# Unfortunatelly, we need to duplicate the password in two different keys because the Neo4j Helm Chart expects the password in the NEO4J_AUTH key and the application expects it in the NEO4J_PASSWORD key.
NEO4J_PASSWORD: "prowler-password-fake"
NEO4J_AUTH: "neo4j/prowler-password-fake"
@@ -0,0 +1,11 @@
ui:
ingress:
enabled: true
hosts:
- host: 127.0.0.1.nip.io
paths:
- path: /
pathType: ImplementationSpecific
# or use authUrl if you use prowler behind a proxy
# authUrl: 127.0.0.1.nip.io
@@ -0,0 +1,134 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "prowler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prowler.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prowler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prowler.labels" -}}
helm.sh/chart: {{ include "prowler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Django environment variables for api, worker, and worker_beat.
*/}}
{{- define "prowler.django.env" -}}
- name: DJANGO_TOKEN_SIGNING_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.djangoTokenSigningKey.secretKeyRef.name }}
key: {{ .Values.djangoTokenSigningKey.secretKeyRef.key }}
- name: DJANGO_TOKEN_VERIFYING_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.djangoTokenVerifyingKey.secretKeyRef.name }}
key: {{ .Values.djangoTokenVerifyingKey.secretKeyRef.key }}
- name: DJANGO_SECRETS_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.djangoSecretsEncryptionKey.secretKeyRef.name }}
key: {{ .Values.djangoSecretsEncryptionKey.secretKeyRef.key }}
{{- end }}
{{/*
PostgreSQL environment variables for api, worker, and worker_beat.
Outputs nothing when postgresql.enabled is false.
*/}}
{{- define "prowler.postgresql.env" -}}
{{- if .Values.postgresql.enabled }}
{{- if .Values.postgresql.auth.username }}
- name: POSTGRES_USER
value: {{ .Values.postgresql.auth.username | quote }}
{{- end }}
- name: POSTGRES_PASSWORD
{{- if .Values.postgresql.auth.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.postgresql.auth.existingSecret }}
key: {{ required "postgresql.auth.secretKeys.userPasswordKey is required when using an existing secret" .Values.postgresql.auth.secretKeys.userPasswordKey }}
{{- else if .Values.postgresql.auth.password }}
value: {{ .Values.postgresql.auth.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgresql
key: password
{{- end }}
- name: POSTGRES_DB
value: {{ .Values.postgresql.auth.database | quote }}
- name: POSTGRES_HOST
value: {{ .Release.Name }}-postgresql
- name: POSTGRES_PORT
value: "5432"
- name: POSTGRES_ADMIN_USER
value: postgres
- name: POSTGRES_ADMIN_PASSWORD
{{- if .Values.postgresql.auth.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.postgresql.auth.existingSecret }}
key: {{ required "postgresql.auth.secretKeys.adminPasswordKey is required when using an existing secret" .Values.postgresql.auth.secretKeys.adminPasswordKey }}
{{- else if .Values.postgresql.auth.postgresPassword }}
value: {{ .Values.postgresql.auth.postgresPassword | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgresql
key: postgres-password
{{- end }}
{{- end }}
{{- end }}
{{/*
Neo4j environment variables for api, worker, and worker_beat.
Outputs nothing when neo4j.enabled is false.
*/}}
{{- define "prowler.neo4j.env" -}}
{{- if .Values.neo4j.enabled }}
- name: NEO4J_HOST
value: {{ .Release.Name }}
- name: NEO4J_PORT
value: "7687"
- name: NEO4J_USER
value: "neo4j"
- name: NEO4J_PASSWORD
valueFrom:
secretKeyRef:
name: {{ required "neo4j.neo4j.passwordFromSecret is required" .Values.neo4j.neo4j.passwordFromSecret }}
key: NEO4J_PASSWORD
{{- end }}
{{- end }}
@@ -0,0 +1,10 @@
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler.api.serviceAccountName" -}}
{{- if .Values.api.serviceAccount.create }}
{{- default (printf "%s-%s" (include "prowler.fullname" .) "api") .Values.api.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.api.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
data:
{{- range $key, $value := .Values.api.djangoConfig }}
{{ $key }}: {{ $value | quote }}
{{- end }}
@@ -0,0 +1,105 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
{{- if not .Values.api.autoscaling.enabled }}
replicas: {{ .Values.api.replicaCount }}
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-api
template:
metadata:
annotations:
secret-hash: "{{ printf "%s%s%s" (.Files.Get "templates/api/configmap.yaml" | sha256sum) (.Files.Get "templates/api/secret-valkey.yaml" | sha256sum) | sha256sum }}"
{{- with .Values.api.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler.labels" . | nindent 8 }}
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-api
{{- with .Values.api.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.api.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler.api.serviceAccountName" . }}
{{- with .Values.api.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: api
{{- with .Values.api.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.api.image.repository }}:{{ .Values.api.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.api.image.pullPolicy }}
{{- with .Values.api.command }}
command:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.api.args }}
args:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.api.service.port }}
protocol: TCP
envFrom:
- configMapRef:
name: {{ include "prowler.fullname" . }}-api
{{- if .Values.valkey.enabled }}
- secretRef:
name: {{ include "prowler.fullname" . }}-api-valkey
{{- end }}
{{- with .Values.api.secrets }}
{{- range $index, $secret := . }}
- secretRef:
name: {{ $secret }}
{{- end }}
{{- end }}
env:
{{- include "prowler.django.env" . | nindent 12 }}
{{- include "prowler.postgresql.env" . | nindent 12 }}
{{- include "prowler.neo4j.env" . | nindent 12 }}
{{- with .Values.api.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.api.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.api.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.api.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.api.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.api.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.api.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.api.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,32 @@
{{- if .Values.api.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "prowler.fullname" . }}-api
minReplicas: {{ .Values.api.autoscaling.minReplicas }}
maxReplicas: {{ .Values.api.autoscaling.maxReplicas }}
metrics:
{{- if .Values.api.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.api.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.api.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.api.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
@@ -0,0 +1,43 @@
{{- if .Values.api.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.api.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.api.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.api.ingress.tls }}
tls:
{{- range .Values.api.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.api.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler.fullname" $ }}-api
port:
number: {{ $.Values.api.service.port }}
{{- end }}
{{- end }}
{{- end }}
@@ -0,0 +1,29 @@
# https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app/#step-44-kubernetes-credentials
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["pods", "configmaps", "nodes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings", "rolebindings", "clusterroles", "roles"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "prowler.fullname" . }}-api
subjects:
- kind: ServiceAccount
name: {{ include "prowler.api.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
@@ -0,0 +1,13 @@
{{- if .Values.valkey.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prowler.fullname" . }}-api-valkey
labels:
{{- include "prowler.labels" . | nindent 4 }}
type: Opaque
stringData:
VALKEY_HOST: "{{ include "prowler.fullname" . }}-valkey"
VALKEY_PORT: "6379"
VALKEY_DB: "0"
{{- end -}}
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler.fullname" . }}-api
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
type: {{ .Values.api.service.type }}
ports:
- port: {{ .Values.api.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-api
@@ -0,0 +1,13 @@
{{- if .Values.api.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler.api.serviceAccountName" . }}
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.api.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.api.serviceAccount.automount }}
{{- end }}
@@ -0,0 +1,10 @@
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler.ui.serviceAccountName" -}}
{{- if .Values.ui.serviceAccount.create }}
{{- default (printf "%s-%s" (include "prowler.fullname" .) "ui") .Values.ui.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.ui.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,18 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ include "prowler.fullname" . }}-ui
data:
PROWLER_UI_VERSION: "stable"
{{- if .Values.ui.ingress.enabled }}
{{- with (first .Values.ui.ingress.hosts) }}
AUTH_URL: "https://{{ .host }}"
{{- end }}
{{- else }}
AUTH_URL: {{ .Values.ui.authUrl | quote }}
{{- end }}
API_BASE_URL: "http://{{ include "prowler.fullname" . }}-api:{{ .Values.api.service.port }}/api/v1"
NEXT_PUBLIC_API_BASE_URL: "http://{{ include "prowler.fullname" . }}-api:{{ .Values.api.service.port }}/api/v1"
NEXT_PUBLIC_API_DOCS_URL: "http://{{ include "prowler.fullname" . }}-api:{{ .Values.api.service.port }}/api/v1/docs"
AUTH_TRUST_HOST: "true"
UI_PORT: {{ .Values.ui.service.port | quote }}
@@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler.fullname" . }}-ui
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
{{- if not .Values.ui.autoscaling.enabled }}
replicas: {{ .Values.ui.replicaCount }}
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-ui
template:
metadata:
annotations:
secret-hash: {{ .Files.Get "templates/ui/configmap.yaml" | sha256sum }}
{{- with .Values.ui.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler.labels" . | nindent 8 }}
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-ui
{{- with .Values.ui.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.ui.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler.ui.serviceAccountName" . }}
{{- with .Values.ui.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: ui
{{- with .Values.ui.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.ui.service.port }}
protocol: TCP
env:
- name: AUTH_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.ui.authSecret.secretKeyRef.name }}
key: {{ .Values.ui.authSecret.secretKeyRef.key }}
envFrom:
- configMapRef:
name: {{ include "prowler.fullname" . }}-ui
{{- with .Values.ui.secrets }}
{{- range $index, $secret := . }}
- secretRef:
name: {{ $secret }}
{{- end }}
{{- end }}
{{- with .Values.ui.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.ui.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.ui.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.ui.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.ui.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.ui.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.ui.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.ui.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,32 @@
{{- if .Values.ui.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "prowler.fullname" . }}-ui
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "prowler.fullname" . }}-ui
minReplicas: {{ .Values.ui.autoscaling.minReplicas }}
maxReplicas: {{ .Values.ui.autoscaling.maxReplicas }}
metrics:
{{- if .Values.ui.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.ui.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.ui.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.ui.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
@@ -0,0 +1,43 @@
{{- if .Values.ui.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler.fullname" . }}-ui
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.ui.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ui.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ui.ingress.tls }}
tls:
{{- range .Values.ui.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ui.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler.fullname" $ }}-ui
port:
number: {{ $.Values.ui.service.port }}
{{- end }}
{{- end }}
{{- end }}
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler.fullname" . }}-ui
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
type: {{ .Values.ui.service.type }}
ports:
- port: {{ .Values.ui.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-ui
@@ -0,0 +1,13 @@
{{- if .Values.ui.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler.ui.serviceAccountName" . }}
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.ui.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.ui.serviceAccount.automount }}
{{- end }}
@@ -0,0 +1,10 @@
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler.worker.serviceAccountName" -}}
{{- if .Values.worker.serviceAccount.create }}
{{- default (printf "%s-%s" (include "prowler.fullname" .) "worker") .Values.worker.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.worker.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,101 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler.fullname" . }}-worker
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
{{- if not .Values.worker.autoscaling.enabled }}
replicas: {{ .Values.worker.replicaCount }}
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-worker
template:
metadata:
annotations:
secret-hash: "{{ printf "%s%s%s" (.Files.Get "templates/api/configmap.yaml" | sha256sum) (.Files.Get "templates/api/secret-valkey.yaml" | sha256sum) | sha256sum }}"
{{- with .Values.worker.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler.labels" . | nindent 8 }}
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-worker
{{- with .Values.worker.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.worker.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler.worker.serviceAccountName" . }}
{{- with .Values.worker.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: worker
{{- with .Values.worker.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.worker.image.pullPolicy }}
{{- with .Values.worker.command }}
command:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.args }}
args:
{{- toYaml . | nindent 12 }}
{{- end }}
envFrom:
- configMapRef:
name: {{ include "prowler.fullname" . }}-api
{{- if .Values.valkey.enabled }}
- secretRef:
name: {{ include "prowler.fullname" . }}-api-valkey
{{- end }}
{{- with .Values.api.secrets }}
{{- range $index, $secret := . }}
- secretRef:
name: {{ $secret }}
{{- end }}
{{- end }}
env:
{{- include "prowler.django.env" . | nindent 12 }}
{{- include "prowler.postgresql.env" . | nindent 12 }}
{{- include "prowler.neo4j.env" . | nindent 12 }}
{{- with .Values.worker.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,32 @@
{{- if .Values.worker.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "prowler.fullname" . }}-worker
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "prowler.fullname" . }}-worker
minReplicas: {{ .Values.worker.autoscaling.minReplicas }}
maxReplicas: {{ .Values.worker.autoscaling.maxReplicas }}
metrics:
{{- if .Values.worker.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.worker.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.worker.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.worker.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
@@ -0,0 +1,32 @@
{{- if .Values.worker.keda.enabled }}
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: {{ include "prowler.fullname" . }}-worker
namespace: {{ $.Release.Namespace }}
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
scaleTargetRef:
name: {{ include "prowler.fullname" . }}-worker
envSourceContainerName: worker
kind: Deployment
minReplicaCount: {{ .Values.worker.keda.minReplicas }}
maxReplicaCount: {{ .Values.worker.keda.maxReplicas }}
pollingInterval: {{ .Values.worker.keda.pollingInterval }}
cooldownPeriod: {{ .Values.worker.keda.cooldownPeriod }}
triggers:
- type: {{ .Values.worker.keda.triggerType }}
metadata:
userName: "postgres"
passwordFromEnv: POSTGRES_ADMIN_PASSWORD
host: {{ .Release.Name }}-postgresql
port: {{ .Values.postgresql.port | quote }}
dbName: {{ .Values.postgresql.auth.database | quote }}
sslmode: disable
# Query for KEDA to count the number of scans that are in executing, available, or scheduled states,
# where the scheduled time is within the last 2 hours and is before NOW(). Used for scaling workers.
query: >-
SELECT COUNT(*) FROM scans WHERE ((state='executing' OR state='available' OR state='scheduled') and scheduled_at < NOW() and scheduled_at > NOW() - INTERVAL '2 hours')
targetQueryValue: "1"
{{- end }}
@@ -0,0 +1,13 @@
{{- if .Values.worker.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler.worker.serviceAccountName" . }}
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.worker.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.worker.serviceAccount.automount }}
{{- end }}
@@ -0,0 +1,10 @@
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler.worker_beat.serviceAccountName" -}}
{{- if .Values.worker_beat.serviceAccount.create }}
{{- default (printf "%s-%s" (include "prowler.fullname" .) "worker-beat") .Values.worker_beat.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.worker_beat.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,99 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler.fullname" . }}-worker-beat
labels:
{{- include "prowler.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.worker_beat.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-worker-beat
template:
metadata:
annotations:
secret-hash: "{{ printf "%s%s%s" (.Files.Get "templates/api/configmap.yaml" | sha256sum) (.Files.Get "templates/api/secret-valkey.yaml" | sha256sum) | sha256sum }}"
{{- with .Values.worker.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler.labels" . | nindent 8 }}
app.kubernetes.io/name: {{ include "prowler.fullname" . }}-worker-beat
{{- with .Values.worker_beat.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.worker_beat.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler.worker_beat.serviceAccountName" . }}
{{- with .Values.worker_beat.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: worker-beat
{{- with .Values.worker_beat.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
image: "{{ .Values.worker_beat.image.repository }}:{{ .Values.worker_beat.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.worker_beat.image.pullPolicy }}
{{- with .Values.worker_beat.command }}
command:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker_beat.args }}
args:
{{- toYaml . | nindent 12 }}
{{- end }}
envFrom:
- configMapRef:
name: {{ include "prowler.fullname" . }}-api
{{- if .Values.valkey.enabled }}
- secretRef:
name: {{ include "prowler.fullname" . }}-api-valkey
{{- end }}
{{- with .Values.api.secrets }}
{{- range $index, $secret := . }}
- secretRef:
name: {{ $secret }}
{{- end }}
{{- end }}
env:
{{- include "prowler.django.env" . | nindent 12 }}
{{- include "prowler.postgresql.env" . | nindent 12 }}
{{- include "prowler.neo4j.env" . | nindent 12 }}
{{- with .Values.worker_beat.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker_beat.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker_beat.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker_beat.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.worker_beat.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker_beat.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker_beat.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker_beat.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,13 @@
{{- if .Values.worker_beat.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler.worker_beat.serviceAccountName" . }}
labels:
{{- include "prowler.labels" . | nindent 4 }}
{{- with .Values.worker_beat.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.worker_beat.serviceAccount.automount }}
{{- end }}
+566
View File
@@ -0,0 +1,566 @@
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
# Reference to the secret containing the API authentication secret.
# Used to inject the environment variable for the API container.
djangoTokenSigningKey:
secretKeyRef:
name: prowler-secret
key: DJANGO_TOKEN_SIGNING_KEY
djangoTokenVerifyingKey:
secretKeyRef:
name: prowler-secret
key: DJANGO_TOKEN_VERIFYING_KEY
djangoSecretsEncryptionKey:
secretKeyRef:
name: prowler-secret
key: DJANGO_SECRETS_ENCRYPTION_KEY
ui:
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-ui
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# Reference to the secret containing the UI authentication secret.
# Used to inject the environment variable for the UI container.
# By default, expects a Secret named 'prowler-secret' with a key 'AUTH_SECRET'.
authSecret:
secretKeyRef:
name: prowler-secret
key: AUTH_SECRET
# Secret names to be used as env vars.
secrets: []
# - "prowler-ui-secret"
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 3000
# The URL of the UI. This is only set if ingress is disabled.
authUrl: ""
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
api:
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-api
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# Shared with celery-worker and celery-beat
djangoConfig:
# API scan settings
# The path to the directory where scan output should be stored
DJANGO_TMP_OUTPUT_DIRECTORY: "/tmp/prowler_api_output"
# The maximum number of findings to process in a single batch
DJANGO_FINDINGS_BATCH_SIZE: "1000"
# Django settings
DJANGO_ALLOWED_HOSTS: "*"
DJANGO_BIND_ADDRESS: "0.0.0.0"
DJANGO_PORT: "8080"
DJANGO_DEBUG: "False"
DJANGO_SETTINGS_MODULE: "config.django.production"
# Select one of [ndjson|human_readable]
DJANGO_LOGGING_FORMATTER: "ndjson"
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
# Applies to both Django and Celery Workers
DJANGO_LOGGING_LEVEL: "INFO"
# Defaults to the maximum available based on CPU cores if not set.
DJANGO_WORKERS: "4"
# Token lifetime is in minutes
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
# Token lifetime is in minutes
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
DJANGO_CACHE_MAX_AGE: "3600"
DJANGO_STALE_WHILE_REVALIDATE: "60"
DJANGO_MANAGE_DB_PARTITIONS: "True"
DJANGO_BROKER_VISIBILITY_TIMEOUT: "86400"
# Secret names to be used as env vars for api, worker, and worker_beat.
secrets: []
# - "prowler-api-keys"
command:
- /home/prowler/docker-entrypoint.sh
args:
- prod
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 8080
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# 3m30s to setup DB
# startupProbe:
# httpGet:
# path: /api/v1/docs
# port: http
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/v1/docs
port: http
periodSeconds: 20
readinessProbe:
failureThreshold: 10
httpGet:
path: /api/v1/docs
port: http
periodSeconds: 20
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
worker:
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-api
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
command:
- /home/prowler/docker-entrypoint.sh
args:
- worker
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe: {}
readinessProbe: {}
# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
# KEDA ScaledObject configuration
keda:
# -- Set to `true` to enable KEDA for the worker pods
# Note: When both KEDA and HPA are enabled, the deployment will fail.
enabled: false
# -- The minimum number of replicas to use for the worker pods
minReplicas: 1
# -- The maximum number of replicas to use for the worker pods
maxReplicas: 2
# -- The polling interval in seconds for checking metrics
pollingInterval: 30
# -- The cooldown period in seconds for scaling
cooldownPeriod: 120
# -- The trigger type for scaling (cpu or memory)
triggerType: "postgresql"
# -- The target utilization percentage for the worker pods
value: "50"
worker_beat:
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-api
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
command:
- ../docker-entrypoint.sh
args:
- beat
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe: {}
readinessProbe: {}
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
postgresql:
# -- Enable PostgreSQL deployment (via Bitnami Helm Chart). If you want to use an external Postgres server (or a managed one), set this to false
# If enabled, it will create a Secret with the credentials.
# Otherwise, create a secret with the following and add it to the api deployment:
# - POSTGRES_HOST
# - POSTGRES_PORT
# - POSTGRES_ADMIN_USER - Existing user in charge of migrations, tables, permissions, RLS
# - POSTGRES_ADMIN_PASSWORD
# - POSTGRES_USER - Will be created by ADMIN_USER
# - POSTGRES_PASSWORD
# - POSTGRES_DB - Existing DB
enabled: true
image:
repository: "bitnami/postgresql"
auth:
database: prowler_db
username: prowler
valkey:
# If enabled, it will create a Secret with the following.
# Otherwise, create a secret with
# - VALKEY_HOST
# - VALKEY_PORT
# - VALKEY_DB
enabled: true
neo4j:
enabled: true
neo4j:
name: prowler-neo4j
edition: community
# The name of the secret containing the Neo4j password with the key NEO4J_PASSWORD
passwordFromSecret: prowler-secret
# Disable lookups during helm template rendering (required for ArgoCD)
disableLookups: true
volumes:
data:
mode: defaultStorageClass
services:
neo4j:
enabled: false
# Neo4j Configuration (yaml format)
config:
dbms_security_procedures_allowlist: "apoc.*"
dbms_security_procedures_unrestricted: "apoc.*"
apoc_config:
apoc.export.file.enabled: "true"
apoc.import.file.enabled: "true"
apoc.import.file.use_neo4j_config: "true"
@@ -115,8 +115,8 @@ To update the environment file:
Edit the `.env` file and change version values:
```env
PROWLER_UI_VERSION="5.17.0"
PROWLER_API_VERSION="5.17.0"
PROWLER_UI_VERSION="5.18.0"
PROWLER_API_VERSION="5.18.0"
```
<Note>
+1
View File
@@ -36,6 +36,7 @@ The supported providers right now are:
| [Cloudflare](/user-guide/providers/cloudflare/getting-started-cloudflare) | Official | Accounts | CLI |
| [Infra as Code](/user-guide/providers/iac/getting-started-iac) | Official | Repositories | UI, API, CLI |
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | Organizations | UI, API, CLI |
| [OpenStack](/user-guide/providers/openstack/getting-started-openstack) | Official | Projects | CLI |
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | Models | CLI |
| **NHN** | Unofficial | Tenants | CLI |
+13 -1
View File
@@ -2,6 +2,18 @@
All notable changes to the **Prowler SDK** are documented in this file.
## [5.19.0] (Prowler UNRELEASED)
### 🚀 Added
- AI Skills: Added a skill for creating new Attack Paths queries in openCypher, compatible with Neo4j and Neptune [(#9975)](https://github.com/prowler-cloud/prowler/pull/9975)
### 🔄 Changed
- Update Azure Monitor service metadata to new format [(#9622)](https://github.com/prowler-cloud/prowler/pull/9622)
---
## [5.18.0] (Prowler v5.18.0)
### 🚀 Added
@@ -36,7 +48,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Update Azure Network service metadata to new format [(#9624)](https://github.com/prowler-cloud/prowler/pull/9624)
- Update Azure Storage service metadata to new format [(#9628)](https://github.com/prowler-cloud/prowler/pull/9628)
### 🐛 Fixed
### 🐞 Fixed
- Duplicated findings in `entra_user_with_vm_access_has_mfa` check when user has multiple VM access roles [(#9914)](https://github.com/prowler-cloud/prowler/pull/9914)
- Jira integration failing with `INVALID_INPUT` error when sending findings with long resource UIDs exceeding 255-character summary limit [(#9926)](https://github.com/prowler-cloud/prowler/pull/9926)
+51
View File
@@ -1144,6 +1144,57 @@ def prowler():
f"{Style.BRIGHT}{Fore.GREEN}\n{findings_archived_in_security_hub} findings archived in AWS Security Hub!{Style.RESET_ALL}"
)
# Elasticsearch Integration (all providers)
if args.elasticsearch:
from prowler.lib.integrations.elasticsearch.elasticsearch import (
Elasticsearch as ElasticsearchIntegration,
)
print(
f"{Style.BRIGHT}\nSending findings to Elasticsearch, please wait...{Style.RESET_ALL}"
)
# Get OCSF data - reuse if already generated, otherwise create
ocsf_data = None
for output in generated_outputs.get("regular", []):
if isinstance(output, OCSF):
ocsf_data = output.data
break
if ocsf_data is None:
# Generate OCSF output without writing to file
ocsf_output = OCSF(findings=finding_outputs, file_path=None)
ocsf_output.transform(finding_outputs)
ocsf_data = ocsf_output.data
elasticsearch = ElasticsearchIntegration(
url=output_options.elasticsearch_url,
index=output_options.elasticsearch_index,
api_key=output_options.elasticsearch_api_key,
username=output_options.elasticsearch_username,
password=output_options.elasticsearch_password,
skip_tls_verify=output_options.elasticsearch_skip_tls_verify,
findings=[f.dict(exclude_none=True) for f in ocsf_data],
send_only_fails=output_options.send_es_only_fails,
)
connection = elasticsearch.test_connection()
if not connection.connected:
print(
f"{Style.BRIGHT}{Fore.RED}\nElasticsearch connection failed: {connection.error_message}{Style.RESET_ALL}"
)
else:
elasticsearch.create_index_if_not_exists()
findings_sent = elasticsearch.batch_send_to_elasticsearch()
if findings_sent == 0:
print(
f"{Style.BRIGHT}{orange_color}\nNo findings sent to Elasticsearch.{Style.RESET_ALL}"
)
else:
print(
f"{Style.BRIGHT}{Fore.GREEN}\n{findings_sent} findings sent to Elasticsearch index '{output_options.elasticsearch_index}'!{Style.RESET_ALL}"
)
# Display summary table
if not args.only_logs:
display_summary_table(
+1 -1
View File
@@ -38,7 +38,7 @@ class _MutableTimestamp:
timestamp = _MutableTimestamp(datetime.today())
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
prowler_version = "5.18.0"
prowler_version = "5.19.0"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
+64
View File
@@ -16,6 +16,7 @@ from prowler.lib.outputs.common import Status
from prowler.providers.common.arguments import (
init_providers_parser,
validate_asff_usage,
validate_elasticsearch_arguments,
validate_provider_arguments,
)
@@ -79,6 +80,7 @@ Detailed documentation at https://docs.prowler.com
self.__init_config_parser__()
self.__init_custom_checks_metadata_parser__()
self.__init_third_party_integrations_parser__()
self.__init_elasticsearch_parser__()
# Init Providers Arguments
init_providers_parser(self)
@@ -145,6 +147,11 @@ Detailed documentation at https://docs.prowler.com
if not asff_is_valid:
self.parser.error(asff_error)
# Validate Elasticsearch arguments
es_is_valid, es_error = validate_elasticsearch_arguments(args)
if not es_is_valid:
self.parser.error(es_error)
return args
def __set_default_provider__(self, args: list) -> list:
@@ -414,3 +421,60 @@ Detailed documentation at https://docs.prowler.com
action="store_true",
help="Send a summary of the execution with a Slack APP in your channel. Environment variables SLACK_API_TOKEN and SLACK_CHANNEL_NAME are required (see more in https://docs.prowler.com/user-guide/cli/tutorials/integrations#configuration-of-the-integration-with-slack/).",
)
def __init_elasticsearch_parser__(self):
"""Init the Elasticsearch integration CLI parser"""
elasticsearch_parser = self.common_providers_parser.add_argument_group(
"Elasticsearch Integration"
)
elasticsearch_parser.add_argument(
"--elasticsearch",
"-E",
action="store_true",
help="Send findings in OCSF format to Elasticsearch",
)
elasticsearch_parser.add_argument(
"--elasticsearch-url",
nargs="?",
type=str,
default=None,
help="Elasticsearch server URL (e.g., https://localhost:9200). Can also use ELASTICSEARCH_URL env var.",
)
elasticsearch_parser.add_argument(
"--elasticsearch-index",
nargs="?",
type=str,
default="prowler-findings",
help="Elasticsearch index name (default: prowler-findings)",
)
elasticsearch_parser.add_argument(
"--elasticsearch-api-key",
nargs="?",
type=str,
default=None,
help="Elasticsearch API key for authentication. Can also use ELASTICSEARCH_API_KEY env var.",
)
elasticsearch_parser.add_argument(
"--elasticsearch-username",
nargs="?",
type=str,
default=None,
help="Elasticsearch username for basic auth. Can also use ELASTICSEARCH_USERNAME env var.",
)
elasticsearch_parser.add_argument(
"--elasticsearch-password",
nargs="?",
type=str,
default=None,
help="Elasticsearch password for basic auth. Can also use ELASTICSEARCH_PASSWORD env var.",
)
elasticsearch_parser.add_argument(
"--elasticsearch-skip-tls-verify",
action="store_true",
help="Skip TLS certificate verification (not recommended for production)",
)
elasticsearch_parser.add_argument(
"--send-es-only-fails",
action="store_true",
help="Send only failed findings to Elasticsearch",
)
@@ -0,0 +1,389 @@
import base64
import json
from dataclasses import dataclass
from datetime import date, datetime
from typing import List, Optional
import requests
import urllib3
from prowler.lib.integrations.elasticsearch.exceptions.exceptions import (
ElasticsearchConnectionError,
ElasticsearchIndexError,
)
from prowler.lib.logger import logger
# Disable SSL warnings when skip_tls_verify is True
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Maximum number of findings to send in a single bulk request
ELASTICSEARCH_MAX_BATCH = 500
def _json_serial(obj):
"""JSON serializer for objects not serializable by default json code."""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")
@dataclass
class ElasticsearchConnection:
"""Elasticsearch connection status."""
connected: bool = False
error_message: str = ""
index_exists: bool = False
class Elasticsearch:
"""Elasticsearch integration for sending OCSF findings."""
def __init__(
self,
url: str,
index: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
skip_tls_verify: bool = False,
findings: List[dict] = None,
send_only_fails: bool = False,
):
"""
Initialize the Elasticsearch integration.
Args:
url: Elasticsearch server URL (e.g., https://localhost:9200)
index: Elasticsearch index name
api_key: Elasticsearch API key for authentication
username: Elasticsearch username for basic auth
password: Elasticsearch password for basic auth
skip_tls_verify: Skip TLS certificate verification
findings: List of OCSF findings to send
send_only_fails: Only send failed findings
"""
self._url = url.rstrip("/") if url else ""
self._index = index
self._api_key = api_key
self._username = username
self._password = password
self._skip_tls_verify = skip_tls_verify
self._send_only_fails = send_only_fails
self._findings = self._filter_findings(findings or [])
self._session = self._create_session()
def _create_session(self) -> requests.Session:
"""Create HTTP session with authentication."""
session = requests.Session()
# Set authentication headers
if self._api_key:
session.headers["Authorization"] = f"ApiKey {self._api_key}"
elif self._username and self._password:
credentials = base64.b64encode(
f"{self._username}:{self._password}".encode()
).decode()
session.headers["Authorization"] = f"Basic {credentials}"
session.headers["Content-Type"] = "application/json"
# Configure TLS verification
session.verify = not self._skip_tls_verify
return session
def _filter_findings(self, findings: List[dict]) -> List[dict]:
"""Filter findings based on status if send_only_fails is True."""
if self._send_only_fails:
return [f for f in findings if f.get("status_code") == "FAIL"]
return findings
def test_connection(self) -> ElasticsearchConnection:
"""
Test connection to Elasticsearch cluster.
Returns:
ElasticsearchConnection with connection status
"""
connection = ElasticsearchConnection()
try:
response = self._session.get(
f"{self._url}/",
timeout=30,
)
if response.status_code == 200:
connection.connected = True
logger.info(f"Successfully connected to Elasticsearch at {self._url}")
elif response.status_code == 401:
connection.error_message = (
"Authentication failed. Check your credentials."
)
logger.error(
f"Elasticsearch authentication failed at {self._url}: {response.text}"
)
else:
connection.error_message = (
f"Unexpected response: {response.status_code} - {response.text}"
)
logger.error(
f"Elasticsearch connection error at {self._url}: {response.status_code}"
)
except requests.exceptions.SSLError as e:
connection.error_message = f"SSL/TLS error. Use --elasticsearch-skip-tls-verify if using self-signed certificates: {str(e)}"
logger.error(f"Elasticsearch SSL error: {e}")
except requests.exceptions.ConnectionError as e:
connection.error_message = f"Could not connect to server: {str(e)}"
logger.error(f"Elasticsearch connection error: {e}")
except requests.exceptions.Timeout as e:
connection.error_message = f"Connection timed out: {str(e)}"
logger.error(f"Elasticsearch timeout: {e}")
except Exception as e:
connection.error_message = f"Unexpected error: {str(e)}"
logger.error(f"Elasticsearch unexpected error: {e}")
return connection
def create_index_if_not_exists(self) -> bool:
"""
Create index with OCSF-compatible mapping if it doesn't exist.
Returns:
True if index exists or was created successfully
"""
try:
# Check if index exists
response = self._session.head(
f"{self._url}/{self._index}",
timeout=30,
)
if response.status_code == 200:
logger.info(f"Elasticsearch index '{self._index}' already exists")
return True
# Create index with dynamic mapping for OCSF data
# Using dynamic mapping to accommodate the full OCSF schema
index_settings = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index.mapping.total_fields.limit": 2000,
},
"mappings": {
"dynamic": True,
"properties": {
"time": {"type": "date", "format": "epoch_second"},
"time_dt": {"type": "date"},
"severity_id": {"type": "integer"},
"severity": {"type": "keyword"},
"status_id": {"type": "integer"},
"status": {"type": "keyword"},
"status_code": {"type": "keyword"},
"activity_id": {"type": "integer"},
"activity_name": {"type": "keyword"},
"type_uid": {"type": "integer"},
"type_name": {"type": "keyword"},
"category_uid": {"type": "integer"},
"category_name": {"type": "keyword"},
"class_uid": {"type": "integer"},
"class_name": {"type": "keyword"},
"message": {"type": "text"},
"status_detail": {"type": "text"},
"risk_details": {"type": "text"},
"finding_info": {
"properties": {
"uid": {"type": "keyword"},
"title": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"desc": {"type": "text"},
"created_time": {
"type": "date",
"format": "epoch_second",
},
"created_time_dt": {"type": "date"},
"types": {"type": "keyword"},
"name": {"type": "keyword"},
}
},
"cloud": {
"properties": {
"provider": {"type": "keyword"},
"region": {"type": "keyword"},
"account": {
"properties": {
"uid": {"type": "keyword"},
"name": {"type": "keyword"},
"type_id": {"type": "integer"},
"type": {"type": "keyword"},
}
},
"org": {
"properties": {
"uid": {"type": "keyword"},
"name": {"type": "keyword"},
}
},
}
},
"resources": {
"type": "nested",
"properties": {
"uid": {"type": "keyword"},
"name": {"type": "keyword"},
"type": {"type": "keyword"},
"region": {"type": "keyword"},
"cloud_partition": {"type": "keyword"},
"namespace": {"type": "keyword"},
"labels": {"type": "keyword"},
"group": {
"properties": {
"name": {"type": "keyword"},
}
},
},
},
"metadata": {
"properties": {
"event_code": {"type": "keyword"},
"version": {"type": "keyword"},
"profiles": {"type": "keyword"},
"tenant_uid": {"type": "keyword"},
"product": {
"properties": {
"uid": {"type": "keyword"},
"name": {"type": "keyword"},
"vendor_name": {"type": "keyword"},
"version": {"type": "keyword"},
}
},
}
},
"remediation": {
"properties": {
"desc": {"type": "text"},
"references": {"type": "keyword"},
}
},
},
},
}
response = self._session.put(
f"{self._url}/{self._index}",
json=index_settings,
timeout=30,
)
if response.status_code in (200, 201):
logger.info(f"Created Elasticsearch index '{self._index}'")
return True
else:
logger.error(
f"Failed to create index '{self._index}': {response.status_code} - {response.text}"
)
return False
except Exception as e:
logger.error(f"Error creating Elasticsearch index: {e}")
raise ElasticsearchIndexError(
index=self._index,
message=str(e),
original_exception=e,
)
def batch_send_to_elasticsearch(self) -> int:
"""
Send findings to Elasticsearch using bulk API.
Returns:
Number of findings successfully sent
"""
if not self._findings:
logger.info("No findings to send to Elasticsearch")
return 0
total_sent = 0
try:
total_sent = self._send_findings_in_batches(self._findings)
logger.info(f"Sent {total_sent} findings to Elasticsearch")
except Exception as e:
logger.error(f"Error sending findings to Elasticsearch: {e}")
raise
return total_sent
def _send_findings_in_batches(self, findings: List[dict]) -> int:
"""
Send findings in batches using the bulk API.
Args:
findings: List of OCSF findings to send
Returns:
Number of findings successfully sent
"""
total_sent = 0
# Process findings in batches
for i in range(0, len(findings), ELASTICSEARCH_MAX_BATCH):
batch = findings[i : i + ELASTICSEARCH_MAX_BATCH]
# Build bulk request body
bulk_body = ""
for finding in batch:
# Use finding_info.uid as the document ID if available
doc_id = finding.get("finding_info", {}).get("uid", None)
if doc_id:
action = {"index": {"_index": self._index, "_id": doc_id}}
else:
action = {"index": {"_index": self._index}}
bulk_body += json.dumps(action) + "\n"
bulk_body += json.dumps(finding, default=_json_serial) + "\n"
try:
response = self._session.post(
f"{self._url}/_bulk",
data=bulk_body,
headers={"Content-Type": "application/x-ndjson"},
timeout=60,
)
if response.status_code in (200, 201):
result = response.json()
if result.get("errors"):
# Count successful items
success_count = sum(
1
for item in result.get("items", [])
if item.get("index", {}).get("status") in (200, 201)
)
failed_count = len(batch) - success_count
logger.warning(
f"Bulk request completed with {failed_count} errors"
)
total_sent += success_count
else:
total_sent += len(batch)
else:
logger.error(
f"Bulk request failed: {response.status_code} - {response.text}"
)
except Exception as e:
logger.error(f"Error in bulk request: {e}")
raise ElasticsearchConnectionError(
url=self._url,
message=f"Bulk request failed: {str(e)}",
original_exception=e,
)
return total_sent
@@ -0,0 +1,50 @@
from prowler.exceptions.exceptions import ProwlerException
class ElasticsearchBaseException(ProwlerException):
"""Base exception for Elasticsearch integration."""
def __init__(self, code: int, message: str, original_exception: Exception = None):
error_info = {
"message": message,
"remediation": "Please check your Elasticsearch configuration and try again.",
}
super().__init__(
code=code,
source="Elasticsearch",
original_exception=original_exception,
error_info=error_info,
)
class ElasticsearchConnectionError(ElasticsearchBaseException):
"""Connection to Elasticsearch failed."""
def __init__(self, url: str, message: str, original_exception: Exception = None):
super().__init__(
code=8000,
message=f"Failed to connect to Elasticsearch at {url}: {message}",
original_exception=original_exception,
)
class ElasticsearchAuthenticationError(ElasticsearchBaseException):
"""Authentication to Elasticsearch failed."""
def __init__(self, message: str, original_exception: Exception = None):
super().__init__(
code=8001,
message=f"Elasticsearch authentication failed: {message}",
original_exception=original_exception,
)
class ElasticsearchIndexError(ElasticsearchBaseException):
"""Index operation failed."""
def __init__(self, index: str, message: str, original_exception: Exception = None):
super().__init__(
code=8002,
message=f"Elasticsearch index '{index}' error: {message}",
original_exception=original_exception,
)
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_policy_assignment",
"CheckTitle": "Ensure that Activity Log Alert exists for Create Policy Assignment",
"CheckTitle": "Subscription has an Azure Monitor activity log alert for policy assignment creation",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
"Risk": "Monitoring for create policy assignment events gives insight into changes done in 'Azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
"RelatedUrl": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"Description": "**Azure Monitor Activity Log alert** configurations are assessed for an **activity log alert** on `Microsoft.Authorization/policyAssignments/write`, indicating monitoring of newly created **Azure Policy assignments**",
"Risk": "Absent alerts on new policy assignments, unauthorized or accidental changes can silently weaken governance. Adversaries could assign permissive policies or replace deny rules, enabling misconfigurations, privilege expansion, and data exposure-degrading **integrity** and threatening **confidentiality** and **availability**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/dotnet/api/azure.resourcemanager.monitor.activitylogalertresource?view=azure-dotnet",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-alert-for-create-policy-assignment-events.html"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/write and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription ID> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-alert-for-create-policy-assignment-events.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --name '<activity log rule name>' --resource-group '<resource group name>' --location global --scopes '/subscriptions/<subscription ID>' --condition \"category=Administrative and operationName=Microsoft.Authorization/policyAssignments/write\" --enabled true",
"NativeIaC": "```bicep\n// Azure Monitor Activity Log Alert for Policy Assignment creation\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: [ '/subscriptions/<subscription ID>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative' // Critical: filter Activity Log category to Administrative\n }\n {\n field: 'operationName'\n equals: 'Microsoft.Authorization/policyAssignments/write' // Critical: alert on Policy Assignment creation\n }\n ]\n }\n }\n}\n```",
"Other": "1. In the Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select the target Subscription and click Apply\n4. Condition: Choose Activity log, then set Category = Administrative and Operation name = Microsoft.Authorization/policyAssignments/write; click Apply\n5. Actions: Skip or select an existing Action group (optional)\n6. Details: Enter a Name and ensure Enable alert rule upon creation is checked\n7. Click Review + create, then Create",
"Terraform": "```hcl\n# Azure Monitor Activity Log Alert for Policy Assignment creation\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"global\"\n scopes = [\"/subscriptions/<subscription ID>\"]\n\n criteria {\n category = \"Administrative\" # Critical: Activity Log category\n operation_name = \"Microsoft.Authorization/policyAssignments/write\" # Critical: Policy Assignment creation\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click elect action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
"Text": "Implement an **activity log alert** for `Microsoft.Authorization/policyAssignments/write` and route to an action group for timely response.\n\nApply across all subscriptions, restrict assignment rights (**least privilege**), require change approval, and integrate notifications with your SIEM for **defense in depth**.",
"Url": "https://hub.prowler.com/check/monitor_alert_create_policy_assignment"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_nsg",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Network Security Group",
"CheckTitle": "Subscription has an Activity Log alert for Network Security Group create or update operations",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
"Risk": "Monitoring for Create or Update Network Security Group events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor Activity Log alert** monitors **Network Security Group** changes via the `Microsoft.Network/networkSecurityGroups/write` operation to capture create/update events across the subscription",
"Risk": "Lack of alerting on NSG changes allows **unauthorized network policy modifications** to go unnoticed. Adversaries or mistakes could open ports, reduce segmentation, and enable **lateral movement**, impacting data **confidentiality** and service **availability** through exposure or disruption of critical traffic",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://learn.microsoft.com/en-us/azure/azure-monitor/platform/activity-log-schema",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-update-network-security-group-rule-alert-in-use.html"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/write and level=verbose --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' --subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-update-network-security-group-rule-alert-in-use.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --resource-group '<example_resource_name>' --name '<example_resource_name>' --scopes '/subscriptions/<subscription ID>' --condition \"category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/write\" --location global",
"NativeIaC": "```bicep\n// Activity Log alert for NSG create/update\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/networkSecurityGroups/write' } // Critical: triggers on NSG create/update\n ]\n }\n enabled: true // Ensures the alert is active\n }\n}\n```",
"Other": "1. In the Azure portal, go to Monitor > Alerts > Alert rules > Create\n2. Scope: Select your subscription and click Apply\n3. Condition: Choose Activity log, set Category to Administrative, set Operation name to Microsoft.Network/networkSecurityGroups/write, then Done\n4. Actions: Skip (optional)\n5. Details: Name the rule and set Region to Global, ensure Enable upon creation is checked\n6. Review + create > Create",
"Terraform": "```hcl\n# Activity Log alert for NSG create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<subscription_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/networkSecurityGroups/write\" # Critical: triggers on NSG create/update\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Implement a subscription-wide **Activity Log alert** for NSG change operations and route notifications to an **action group** for rapid triage.\n\nApply **least privilege** for change tooling, enforce **change management**, and add complementary alerts for `Microsoft.Network/networkSecurityGroups/securityRules/write` and `.../delete`. *Integrate with SIEM for correlation*",
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_nsg"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,39 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_public_ip_address_rule",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Public IP Address rule",
"CheckTitle": "Subscription has an Activity Log Alert for Public IP address create or update operations",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
"Risk": "Monitoring for Create or Update Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor activity log alert** for **Public IP addresses** tracks `Microsoft.Network/publicIPAddresses/write` events at the subscription level, covering any creation or update of public IP resources.",
"Risk": "Without this alert, unauthorized or mistaken public IP changes can go unnoticed, exposing workloads to the Internet.\n- Confidentiality: unexpected ingress paths\n- Integrity: shadow endpoints for control\n- Availability: larger DDoS surface and outages",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-public-ip-alert.html",
"https://support.icompaas.com/support/solutions/articles/62000229918-ensure-that-activity-log-alert-exists-for-create-or-update-public-ip-address-rule",
"https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/monitor-public-ip"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-public-ip-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --resource-group <example_resource_name> --name <example_resource_name> --scopes /subscriptions/<example_resource_id> --condition \"category=Administrative and operationName=Microsoft.Network/publicIPAddresses/write\" --location global",
"NativeIaC": "```bicep\n// Activity Log Alert for Public IP create/update\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: ['/subscriptions/<example_resource_id>']\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/publicIPAddresses/write' } // Critical: alerts on Public IP create/update\n ]\n }\n }\n}\n```",
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules > Create\n2. Scope: Select your subscription and click Done\n3. Condition: Choose Activity log, then select the signal \"Create or Update Public Ip Address (publicIPAddresses)\"\n4. Details: Enter an alert rule name; Region: Global; Ensure Enable alert rule upon creation is checked\n5. Click Review + create, then Create",
"Terraform": "```hcl\n# Activity Log Alert for Public IP create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/publicIPAddresses/write\" # Critical: alerts on Public IP create/update\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Create a subscription-wide **activity log alert** on `Microsoft.Network/publicIPAddresses/write` and route it to an **action group**.\n\nEnforce **least privilege** for IP management, apply **change control**, and use **defense in depth** (private endpoints, bastions, VPN) to minimize public exposure and speed response.",
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_public_ip_address_rule"
}
},
"Categories": [],
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_security_solution",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Security Solution",
"CheckTitle": "Subscription has Activity Log alert for Security Solution create or update",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
"Risk": "Monitoring for Create or Update Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor activity log alert** is configured to capture **Security Solutions** create/update operations (`Microsoft.Security/securitySolutions/write`) at subscription scope.",
"Risk": "Without this alert, **unauthorized or mistaken changes** to security tooling can go undetected. Attackers could disable defenses, alter integrations, or weaken policies, eroding the **integrity** of controls, creating blind spots that threaten **confidentiality**, and delaying incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-security-solution-alert.html#trendmicro"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-security-solution-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --name \"<activity log rule name>\" --resource-group \"<example_resource_name>\" --scopes \"/subscriptions/<example_resource_id>\" --condition \"category=Administrative and operationName=Microsoft.Security/securitySolutions/write\" --location Global",
"NativeIaC": "```bicep\n// Activity Log Alert for Security Solution create/update\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [ '/subscriptions/<example_resource_id>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative'\n }\n {\n field: 'operationName' // Critical: match Security Solution create/update\n equals: 'Microsoft.Security/securitySolutions/write' // Triggers on this operation\n }\n ]\n }\n enabled: true\n }\n}\n```",
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your Subscription and click Apply\n3. Condition: Choose Activity log, set Signal name to Administrative, then add a filter Operation name = Microsoft.Security/securitySolutions/write\n4. Actions: Skip (no action group required)\n5. Details: Enter a Name, set Region to Global, ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
"Terraform": "```hcl\n# Activity Log Alert for Security Solution create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Security/securitySolutions/write\" # Critical: fires on Security Solution create/update\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Configure an **activity log alert** for `Microsoft.Security/securitySolutions/write` and route it to action groups for prompt notification/automation.\n\nApply **least privilege**, require **change control**, and forward alerts to a central SIEM to strengthen **defense in depth**.",
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_security_solution"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_create_update_sqlserver_fr",
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule",
"CheckTitle": "Subscription has an Activity Log alert for SQL Server firewall rule create or update events",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
"Risk": "Monitoring for Create or Update SQL Server Firewall Rule events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor activity log alerts** are configured for **Azure SQL Server firewall rule changes**, targeting the `Microsoft.Sql/servers/firewallRules/write` operation.\n\nThis evaluates whether notifications or automated actions are set when firewall rules are created or updated.",
"Risk": "Without alerting on firewall rule changes, unauthorized or accidental openings can remain unnoticed, exposing databases to untrusted networks.\n\nThis harms **confidentiality** (data exfiltration via widened IP ranges) and **integrity** (unauthorized queries), while increasing attacker dwell time.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --name <activity_log_rule_name> --resource-group <resource_group_name> --scopes /subscriptions/<subscription_id> --condition \"category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/write\" --location global",
"NativeIaC": "```bicep\n// Activity Log alert for SQL Server firewall rule create/update\nresource example_activity_log_alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [ '/subscriptions/<example_resource_id>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative'\n }\n {\n field: 'operationName'\n equals: 'Microsoft.Sql/servers/firewallRules/write' // Critical: alert on SQL Server firewall rule create/update\n }\n ]\n }\n }\n}\n```",
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select the subscription and click Done\n3. Condition: Choose Signal type \"Activity log\", then set\n - Category: Administrative\n - Operation name: Microsoft.Sql/servers/firewallRules/write\n Click Done\n4. Actions: Skip (no action group required)\n5. Details: Enter an Alert rule name and ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
"Terraform": "```hcl\n# Activity Log alert for SQL Server firewall rule create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Sql/servers/firewallRules/write\" # Critical: alert on SQL Server firewall rule create/update\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create/Update server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Enable an activity log alert for `Microsoft.Sql/servers/firewallRules/write` and route it to responsive action groups.\n\nApply **least privilege** for firewall management, enforce change approvals, and use **defense in depth**: prefer **private endpoints** and avoid broad public network access.",
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_sqlserver_fr"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,38 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_nsg",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Network Security Group",
"CheckTitle": "Subscription has an Activity Log alert for Network Security Group delete operations",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
"Risk": "Monitoring for 'Delete Network Security Group' events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor activity log alerts** include the NSG deletion signal (`Microsoft.Network/networkSecurityGroups/delete` or `Microsoft.ClassicNetwork/networkSecurityGroups/delete`). The finding indicates whether a subscription has an alert rule configured to trigger when a Network Security Group is deleted.",
"Risk": "Without alerting on **NSG deletions**, network segmentation can be removed unnoticed, exposing services to broad ingress/egress. Malicious actors or automation may delete NSGs to enable **lateral movement** and **data exfiltration**. Missing alerts delay response, impacting confidentiality and availability.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-network-security-group-rule-alert-in-use.html#trendmicro"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-network-security-group-rule-alert-in-use.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --name \"<activity log rule name>\" --resource-group \"<resource group name>\" --scopes \"/subscriptions/<subscription ID>\" --condition \"category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/delete\" --location global",
"NativeIaC": "```bicep\n// Activity Log alert for NSG delete\nresource activityAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: ['/subscriptions/<example_resource_id>']\n enabled: true\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' } // Critical: filter Activity Log to Administrative category\n { field: 'operationName', equals: 'Microsoft.Network/networkSecurityGroups/delete' } // Critical: triggers on NSG delete\n ]\n }\n }\n}\n```",
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select the target subscription and click Apply\n4. Condition: Choose Activity log, select the signal \"Delete Network Security Group\" (operation Microsoft.Network/networkSecurityGroups/delete); ensure Category is Administrative\n5. Details: Enter a name; leave other settings as default\n6. Click Review + create, then Create",
"Terraform": "```hcl\n# Activity Log alert for NSG delete\nresource \"azurerm_monitor_activity_log_alert\" \"example\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\" # Critical: Activity Log category filter\n operation_name = \"Microsoft.Network/networkSecurityGroups/delete\" # Critical: alert on NSG delete\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Configure a subscription-wide **activity log alert** for the NSG delete operation (`Microsoft.Network/networkSecurityGroups/delete`; include Classic if applicable) and route notifications via **action groups**. Enforce **least privilege** for NSG changes, require **change control**, and integrate with your **SIEM** for correlation.",
"Url": "https://hub.prowler.com/check/monitor_alert_delete_nsg"
}
},
"Categories": [],
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_policy_assignment",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Policy Assignment",
"CheckTitle": "Subscription has an Activity Log alert for policy assignment deletion",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
"Risk": "Monitoring for delete policy assignment events gives insight into changes done in 'azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
"RelatedUrl": "https://docs.microsoft.com/en-in/rest/api/monitor/activitylogalerts/createorupdate",
"Description": "**Azure Monitor Activity log alerts** for policy assignment deletions using the `Microsoft.Authorization/policyAssignments/delete` operation at subscription scope",
"Risk": "Without this alert, **policy assignment deletions** can go unnoticed, eroding configuration **integrity** and enabling governance drift. Malicious or accidental changes may remove guardrails, increasing exposure and threatening **confidentiality** of protected resources.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://learn.microsoft.com/en-in/rest/api/monitor/activity-log-alerts/create-or-update?view=rest-monitor-2020-10-01&tabs=HTTP",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-policy-assignment-alert-in-use.html#trendmicro"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/delete and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-policy-assignment-alert-in-use.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --resource-group <example_resource_name> --name <example_resource_name> --scopes \"/subscriptions/<example_resource_id>\" --condition \"operationName=Microsoft.Authorization/policyAssignments/delete\" --location global",
"NativeIaC": "```bicep\n// Activity Log alert for Policy Assignment deletion\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [\n '/subscriptions/<example_resource_id>'\n ]\n condition: {\n allOf: [\n {\n field: 'operationName'\n equals: 'Microsoft.Authorization/policyAssignments/delete' // CRITICAL: alerts on policy assignment deletion\n }\n ]\n }\n actions: {\n actionGroups: [] // Required property; empty keeps rule minimal\n }\n }\n}\n```",
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select your subscription and click Apply\n4. Condition: Choose Activity log, then set Operation name equals \"Microsoft.Authorization/policyAssignments/delete\"\n5. Actions: Skip (optional)\n6. Details: Enter a name and set Enable alert rule upon creation\n7. Click Create",
"Terraform": "```hcl\n# Activity Log alert for Policy Assignment deletion\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n operation_name = \"Microsoft.Authorization/policyAssignments/delete\" # CRITICAL: alerts on policy assignment deletion\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
"Text": "- Configure an activity log alert for `Microsoft.Authorization/policyAssignments/delete` and route to an action group.\n- Enforce **least privilege** and **separation of duties** for policy changes and require approvals.\n- Integrate alerts with your SIEM and define playbooks for rapid response.",
"Url": "https://hub.prowler.com/check/monitor_alert_delete_policy_assignment"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_public_ip_address_rule",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Public IP Address rule",
"CheckTitle": "Azure subscription has an Activity Log alert for public IP address deletion",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
"Risk": "Monitoring for Delete Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor activity log alert** exists for the **Delete Public IP Address** operation (`Microsoft.Network/publicIPAddresses/delete`), capturing subscription-wide events when Public IP resources are removed.",
"Risk": "Unmonitored deletion of Public IPs can abruptly sever ingress/egress, break DNS and allowlists, and take services offline (**availability**). Attackers or misconfigurations can delete IPs to cause **DoS** or evade controls, and delayed visibility hinders **incident response** and **forensics**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-public-ip-alert.html#trendmicro",
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-public-ip-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --name <activity_log_rule_name> --resource-group <resource_group_name> --location global --scopes /subscriptions/<subscription_id> --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/delete",
"NativeIaC": "```bicep\n// Activity Log alert for Public IP deletion\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [\n '/subscriptions/<example_resource_id>' // Scope the alert to the subscription\n ]\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/publicIPAddresses/delete' } // Critical: triggers when a Public IP is deleted\n ]\n }\n }\n}\n```",
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your subscription and click Apply\n3. Condition: Choose Activity log, then set Category = Administrative and Operation name = Microsoft.Network/publicIPAddresses/delete; click Apply\n4. Actions: Skip (no action group required to pass)\n5. Details: Enter an alert name, set Region to Global, ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
"Terraform": "```hcl\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/publicIPAddresses/delete\" # Critical: alert when a Public IP is deleted\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Implement an activity log alert for `Microsoft.Network/publicIPAddresses/delete` and route it to an action group for rapid response.\n- Apply **least privilege** and change approval for IP deletions\n- Use **resource locks** on critical IPs\n- Centralize alerts in your SIEM and define runbooks for containment",
"Url": "https://hub.prowler.com/check/monitor_alert_delete_public_ip_address_rule"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,38 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_security_solution",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Security Solution",
"CheckTitle": "Subscription has an Azure Monitor Activity Log alert for Microsoft.Security/securitySolutions delete operations",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the Delete Security Solution event.",
"Risk": "Monitoring for Delete Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure activity log alerts** monitor deletions of **Security Solutions** by targeting the operation `Microsoft.Security/securitySolutions/delete` at subscription scope.\n\nIdentifies whether notifications are configured for security solution removal events.",
"Risk": "Without this alert, **unauthorized or accidental deletions** of security tooling may go **unnoticed**, reducing the **availability** of protections and the **integrity** of monitoring. Adversaries can evade defenses, prolong dwell time, and enable **data exfiltration** under reduced visibility.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-security-solution-alert.html",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://learn.microsoft.com/en-us/cli/azure/monitor/activity-log/alert?view=azure-cli-latest",
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-security-solution-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create -g <example_resource_name> -n <example_resource_name> --condition operationName=Microsoft.Security/securitySolutions/delete --scope /subscriptions/<example_resource_id>",
"NativeIaC": "```bicep\n// Activity Log Alert for Security Solution delete\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n {\n field: 'operationName'\n equals: 'Microsoft.Security/securitySolutions/delete' // Critical: alerts on Security Solution delete\n }\n ]\n }\n }\n}\n```",
"Other": "1. In Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your subscription and click Apply\n3. Condition: Click Add condition, search and select \"Delete Security Solutions (Microsoft.Security/securitySolutions)\", then Add\n4. Ensure no filters for Level or Status are set\n5. Details: Enter an Alert rule name and choose a resource group\n6. Create: Review + create, then Create",
"Terraform": "```hcl\n# Activity Log Alert for Security Solution delete\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n operation_name = \"Microsoft.Security/securitySolutions/delete\" # Critical: alerts on delete operation\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.curitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Configure a **dedicated activity log alert** for `Microsoft.Security/securitySolutions/delete` and route it to resilient **action groups** (email, chat, ticketing, SIEM). Apply **least privilege** and **resource locks** to deter tampering. Test alerting routinely and integrate it into **defense-in-depth** monitoring.",
"Url": "https://hub.prowler.com/check/monitor_alert_delete_security_solution"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_delete_sqlserver_fr",
"CheckTitle": "Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule",
"CheckTitle": "Subscription has an Activity Log Alert for SQL Server firewall rule deletions",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
"Risk": "Monitoring for Delete SQL Server Firewall Rule events gives insight into SQL network access changes and may reduce the time it takes to detect suspicious activity.",
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
"Description": "**Azure Monitor Activity log alerts** watch the admin operation `Microsoft.Sql/servers/firewallRules/delete`, indicating when an **Azure SQL firewall rule** is removed across a subscription.",
"Risk": "Without alerting on firewall rule deletions, unexpected changes to SQL network allowlists can go unnoticed, causing **availability** loss for apps and masking **unauthorized tampering**. A compromised admin could remove rules to disrupt service, erode control **integrity**, and delay response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
"Terraform": ""
"CLI": "az monitor activity-log alert create --resource-group <resource_group_name> --name <alert_name> --scopes /subscriptions/<subscription_id> --condition \"category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/delete\" --location global",
"NativeIaC": "```bicep\n// Activity Log Alert for SQL Server firewall rule deletions\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [\n '/subscriptions/<example_subscription_id>'\n ]\n enabled: true\n condition: {\n allOf: [\n {\n field: 'category' // Critical: filter Activity Log category\n equals: 'Administrative' // Ensures Administrative events are matched\n }\n {\n field: 'operationName' // Critical: target deletion of SQL Server firewall rules\n equals: 'Microsoft.Sql/servers/firewallRules/delete' // This makes the check PASS\n }\n ]\n }\n }\n}\n```",
"Other": "1. In the Azure Portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select the target Subscription and click Done\n3. Condition: Click Add condition, choose Signal type = Activity log, search for and select the operation with type \"Microsoft.Sql/servers/firewallRules/delete\" (display name like \"Delete Server Firewall Rule\"), then Click Apply\n4. Actions: Skip (optional)\n5. Details: Enter an Alert rule name and ensure Enable upon creation is selected\n6. Click Create",
"Terraform": "```hcl\n# Activity Log Alert for SQL Server firewall rule deletions\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_subscription_id>\"]\n\n criteria {\n category = \"Administrative\" # Critical: filter Activity Log category\n operation_name = \"Microsoft.Sql/servers/firewallRules/delete\" # Critical: match deletion of SQL Server firewall rules\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
"Text": "Create an **activity log alert** for `Microsoft.Sql/servers/firewallRules/delete` and route it via an **action group** for rapid triage.\n\nEnforce **least privilege** and **separation of duties** on SQL admins, add alerts for related create/update operations, integrate with **SIEM**, and require *change approval* to strengthen defense in depth.",
"Url": "https://hub.prowler.com/check/monitor_alert_delete_sqlserver_fr"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, no monitoring alerts are created."
@@ -1,30 +1,39 @@
{
"Provider": "azure",
"CheckID": "monitor_alert_service_health_exists",
"CheckTitle": "Ensure that an Activity Log Alert exists for Service Health",
"CheckTitle": "Azure subscription has an enabled Activity Log alert for Service Health incidents",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"Severity": "medium",
"ResourceType": "microsoft.insights/activitylogalerts",
"ResourceGroup": "monitoring",
"Description": "Ensure that an Azure activity log alert is configured to trigger when Service Health events occur within your Microsoft Azure cloud account. The alert should activate when new events match the specified conditions in the alert rule configuration.",
"Risk": "Lack of monitoring for Service Health events may result in missing critical service issues, planned maintenance, security advisories, or other changes that could impact Azure services and regions in use.",
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/service-health/overview",
"Description": "**Azure Monitor Activity Log alert** is configured for **Service Health** notifications where `category` is `ServiceHealth` and `properties.incidentType` is `Incident`, with the rule enabled.",
"Risk": "Without alerts for **Service Health incidents**, teams may miss Azure outages or degradations, harming **availability** and delaying failover. Unseen incidents can cause cascading errors, timeouts, deployment failures, and SLA breaches across dependent workloads.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/azure/service-health/service-health-notifications-properties",
"https://learn.microsoft.com/en-us/azure/service-health/alerts-activity-log-service-notifications-portal",
"https://learn.microsoft.com/en-us/azure/service-health/overview",
"https://learn.microsoft.com/en-us/azure/azure-monitor/platform/activity-log-schema",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/service-health-alert.html"
],
"Remediation": {
"Code": {
"CLI": "az monitor activity-log alert create --subscription <subscription-id> --resource-group <resource-group> --name <alert-rule> --condition category=ServiceHealth and properties.incidentType=Incident --scope /subscriptions/<subscription-id> --action-group <action-group>",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/service-health-alert.html",
"Terraform": ""
"CLI": "az monitor activity-log alert create --resource-group <resource-group> --name <alert-rule> --scopes /subscriptions/<subscription-id> --condition \"category=ServiceHealth and properties.incidentType=Incident\"",
"NativeIaC": "```bicep\n// Activity Log Alert for Service Health Incidents\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n { field: 'category', equals: 'ServiceHealth' } // Critical: match Service Health category\n { field: 'properties.incidentType', equals: 'Incident' } // Critical: alert only on Incident events\n ]\n }\n }\n}\n```",
"Other": "1. In the Azure portal, go to Service Health > Health alerts > Create service health alert\n2. Scope: select your Subscription and choose the Resource group to save the alert\n3. Event types: select only Service issues (Incidents)\n4. Leave other filters as default, ensure Enable rule is On, then click Create",
"Terraform": "```hcl\n# Activity Log Alert for Service Health Incidents\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_subscription_id>\"]\n\n criteria {\n category = \"ServiceHealth\" # Critical: Service Health category\n service_health {\n events = [\"Incident\"] # Critical: alert only on Incident type\n }\n }\n}\n```"
},
"Recommendation": {
"Text": "Create an activity log alert for Service Health events and configure an action group to notify appropriate personnel.",
"Url": "https://learn.microsoft.com/en-us/azure/service-health/alerts-activity-log-service-notifications-portal"
"Text": "Create and maintain an enabled **Activity Log alert** for **Service Health Incident** events.\n- Route via **Action Groups** to on-call channels\n- Filter to critical services/regions\n- Test routing and refine recipients regularly\n- Integrate with **incident response** and **defense-in-depth** monitoring",
"Url": "https://hub.prowler.com/check/monitor_alert_service_health_exists"
}
},
"Categories": [],
"Categories": [
"resilience"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, in your Azure subscription there will not be any activity log alerts configured for Service Health events."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_diagnostic_setting_with_appropriate_categories",
"CheckTitle": "Ensure Diagnostic Setting captures appropriate categories",
"CheckTitle": "Subscription has a diagnostic setting capturing Administrative, Security, Alert, and Policy categories",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "Configuring Diagnostic Settings",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Monitor",
"Severity": "high",
"ResourceType": "microsoft.resources/subscriptions",
"ResourceGroup": "monitoring",
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
"Risk": "A diagnostic setting controls how the diagnostic log is exported. Capturing the diagnostic setting categories for appropriate control/management plane activities allows proper alerting.",
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
"Description": "**Azure Monitor Diagnostic Settings** capture **control-plane events** at the subscription level. This evaluates whether at least one setting collects the categories: `Administrative`, `Security`, `Policy`, and `Alert`.",
"Risk": "Without these categories, critical control-plane actions may go unrecorded. Attackers could change policies, roles, or alerts unnoticed, enabling privilege escalation and resource tampering. This erodes **integrity**, threatens **confidentiality**, and weakens **availability** and **incident response**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/diagnostic-setting-categories.html",
"https://learn.microsoft.com/en-us/azure/storage/common/manage-storage-analytics-logs?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json&tabs=azure-portal"
],
"Remediation": {
"Code": {
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <diagnostic settings name> --location <location> <[- -event-hub <event hub ID> --event-hub-auth-rule <event hub auth rule ID>] [-- storage-account <storage account ID>] [--workspace <log analytics workspace ID>] --logs '[{category:Security,enabled:true},{category:Administrative,enabled:true},{ca tegory:Alert,enabled:true},{category:Policy,enabled:true}]'>",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/diagnostic-setting-categories.html",
"Terraform": ""
"CLI": "az monitor diagnostic-settings subscription create --name <example_resource_name> --workspace <example_resource_id> --logs '[{\"category\":\"Administrative\",\"enabled\":true},{\"category\":\"Security\",\"enabled\":true},{\"category\":\"Alert\",\"enabled\":true},{\"category\":\"Policy\",\"enabled\":true}]'",
"NativeIaC": "```bicep\n// Create a subscription-level diagnostic setting capturing required categories\ntargetScope = 'subscription'\n\nresource diag 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {\n name: '<example_resource_name>'\n properties: {\n workspaceId: '<example_resource_id>' // Critical: send Activity Log to this Log Analytics workspace\n logs: [\n { category: 'Administrative', enabled: true } // Critical: required category\n { category: 'Security', enabled: true } // Critical: required category\n { category: 'Alert', enabled: true } // Critical: required category\n { category: 'Policy', enabled: true } // Critical: required category\n ]\n }\n}\n```",
"Other": "1. In Azure portal, go to Monitor > Activity log\n2. Click Diagnostic settings > Add diagnostic setting\n3. Name the setting\n4. Under Categories, check: Administrative, Security, Alert, Policy\n5. Under Destination, select Send to Log Analytics workspace and choose your workspace\n6. Click Save",
"Terraform": "```hcl\n# Subscription Activity Log diagnostic setting capturing required categories\nresource \"azurerm_monitor_diagnostic_setting\" \"example\" {\n name = \"<example_resource_name>\"\n target_resource_id = \"/subscriptions/<example_resource_id>\" # Critical: scope set to the subscription\n log_analytics_workspace_id = \"<example_resource_id>\" # Critical: destination workspace\n\n enabled_log { category = \"Administrative\" } # Critical: required category\n enabled_log { category = \"Security\" } # Critical: required category\n enabled_log { category = \"Alert\" } # Critical: required category\n enabled_log { category = \"Policy\" } # Critical: required category\n}\n```"
},
"Recommendation": {
"Text": "1. Go to Azure Monitor 2. Click Activity log 3. Click on Export Activity Logs 4. Select the Subscription from the drop down menu 5. Click on Add diagnostic setting 6. Enter a name for your new Diagnostic Setting 7. Check the following categories: Administrative, Alert, Policy, and Security 8. Choose the destination details according to your organization's needs.",
"Url": "https://learn.microsoft.com/en-us/azure/storage/common/manage-storage-analytics-logs?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json&tabs=azure-portal"
"Text": "Collect `Administrative`, `Security`, `Policy`, and `Alert` via a subscription diagnostic setting and route them to a centralized, tamper-resistant destination. Enforce **least privilege** on log access, set retention, and create **alerts** for high-risk changes as part of **defense in depth**.",
"Url": "https://hub.prowler.com/check/monitor_diagnostic_setting_with_appropriate_categories"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "When the diagnostic setting is created using Azure Portal, by default no categories are selected."
@@ -1,30 +1,38 @@
{
"Provider": "azure",
"CheckID": "monitor_diagnostic_settings_exists",
"CheckTitle": "Ensure that a 'Diagnostic Setting' exists for Subscription Activity Logs ",
"CheckTitle": "Subscription has an Activity Log diagnostic setting",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Monitor",
"Severity": "high",
"ResourceType": "microsoft.resources/subscriptions",
"ResourceGroup": "monitoring",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
"Risk": "A diagnostic setting controls how a diagnostic log is exported. By default, logs are retained only for 90 days. Diagnostic settings should be defined so that logs can be exported and stored for a longer duration in order to analyze security activities within an Azure subscription.",
"RelatedUrl": "https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest",
"Description": "**Azure Monitor Diagnostic Settings** are configured to export the **Activity Log** to an external destination (Log Analytics, Storage, Event Hub, or partner).",
"Risk": "Without exporting the **Activity Log**, control-plane events lack **centralization and retention**.\n\nUndetected RBAC changes, policy updates, and resource deletions reduce **detectability**, hinder **forensics**, and weaken incident response and audit evidence.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings?WT.mc_id=AZ-MVP-5003450&tabs=portal",
"https://learn.microsoft.com/en-us/azure/azure-monitor/fundamentals/data-sources#export-the-activity-log-with-a-log-profile",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/subscription-activity-log-diagnostic-settings.html",
"https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest"
],
"Remediation": {
"Code": {
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <diagnostic settings name> --location <location> <[- -event-hub <event hub ID> --event-hub-auth-rule <event hub auth rule ID>] [-- storage-account <storage account ID>] [--workspace <log analytics workspace ID>] --logs '<JSON encoded categories>' (e.g. [{category:Security,enabled:true},{category:Administrative,enabled:true},{cat egory:Alert,enabled:true},{category:Policy,enabled:true}])",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/subscription-activity-log-diagnostic-settings.html#trendmicro",
"Terraform": ""
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <example_resource_name> --workspace <log analytics workspace ID> --logs '[{\"category\":\"Administrative\",\"enabled\":true}]'",
"NativeIaC": "```bicep\n// Subscription-level Activity Log diagnostic setting\nresource diag 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {\n name: '<example_resource_name>'\n scope: subscription() // CRITICAL: targets the subscription Activity Log\n properties: {\n workspaceId: '<example_resource_id>' // CRITICAL: sends logs to this Log Analytics workspace\n logs: [\n { category: 'Administrative', enabled: true } // CRITICAL: enables at least one Activity Log category\n ]\n }\n}\n```",
"Other": "1. In the Azure portal, go to Subscriptions and select your subscription\n2. Open Monitoring > Activity log, then click Diagnostic settings\n3. Click + Add diagnostic setting and enter a name\n4. Under Destination details, select Send to Log Analytics workspace and choose your workspace\n5. Under Categories, select Administrative\n6. Click Save",
"Terraform": "```hcl\n# Subscription-level Activity Log diagnostic setting\nresource \"azurerm_monitor_diagnostic_setting\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n target_resource_id = \"/subscriptions/<subscription id>\" # CRITICAL: subscription scope\n log_analytics_workspace_id = \"<example_resource_id>\" # CRITICAL: destination workspace\n\n log {\n category = \"Administrative\" # CRITICAL: enable at least one Activity Log category\n enabled = true\n }\n}\n```"
},
"Recommendation": {
"Text": "To enable Diagnostic Settings on a Subscription: 1. Go to Monitor 2. Click on Activity Log 3. Click on Export Activity Logs 4. Click + Add diagnostic setting 5. Enter a Diagnostic setting name 6. Select Categories for the diagnostic settings 7. Select the appropriate Destination details (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 8. Click Save To enable Diagnostic Settings on a specific resource: 1. Go to Monitor 2. Click Diagnostic settings 3. Click on the resource that has a diagnostics status of disabled 4. Select Add Diagnostic Setting 5. Enter a Diagnostic setting name 6. Select the appropriate log, metric, and destination. (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 7. Click save Repeat these step for all resources as needed.",
"Url": "https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-activity-logs#export-the-activity-log-with-a-log-profile"
"Text": "Enable **subscription Diagnostic Settings** to send the **Activity Log** to a trusted destination.\n\nUse **immutable storage** or a **SIEM**, enforce coverage with **Azure Policy**, apply **least privilege** to log access, include essential categories, and set retention aligned to regulatory needs.",
"Url": "https://hub.prowler.com/check/monitor_diagnostic_settings_exists"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "By default, diagnostic setting is not set."
@@ -1,30 +1,37 @@
{
"Provider": "azure",
"CheckID": "monitor_storage_account_with_activity_logs_cmk_encrypted",
"CheckTitle": "Ensure the storage account containing the container with activity logs is encrypted with Customer Managed Key",
"CheckTitle": "Storage account storing Activity Log data is encrypted with a customer-managed key",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Monitor",
"ResourceType": "microsoft.storage/storageaccounts",
"ResourceGroup": "monitoring",
"Description": "Storage accounts with the activity log exports can be configured to use CustomerManaged Keys (CMK).",
"Risk": "Configuring the storage account with the activity log export container to use CMKs provides additional confidentiality controls on log data, as a given user must have read permission on the corresponding storage account and must be granted decrypt permission by the CMK.",
"RelatedUrl": "https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-data-protection#dp-5-encrypt-sensitive-data-at-rest",
"Description": "**Azure Monitor Activity Logs** sent to a **Storage account** are evaluated to confirm encryption with **Customer-Managed Keys** (`CMK`) instead of **Microsoft-managed keys**.",
"Risk": "Storing activity logs without **CMK** weakens confidentiality and control of audit data. You lose independent key ownership, limiting rapid rotation/revocation and separation of duties. If storage credentials are compromised, attackers can exfiltrate logs that map resources and changes, aiding targeted attacks and hindering effective incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log?tabs=cli#managing-legacy-log-profiles",
"https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-data-protection#dp-5-encrypt-sensitive-data-at-rest",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/use-cmk-for-activity-log-storage-container-encryption.html"
],
"Remediation": {
"Code": {
"CLI": "az storage account update --name <name of the storage account> --resource-group <resource group for a storage account> --encryption-key-source=Microsoft.Keyvault --encryption-key-vault <Key Vault URI> --encryption-key-name <KeyName> --encryption-key-version <Key Version>",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/use-cmk-for-activity-log-storage-container-encryption.html",
"Terraform": "https://docs.prowler.com/checks/azure/azure-general-policies/ensure-that-storage-accounts-use-customer-managed-key-for-encryption#terraform"
"CLI": "az storage account update --name <example_resource_name> --resource-group <example_resource_name> --assign-identity --encryption-key-source Microsoft.Keyvault --encryption-key-vault <KeyVaultURI> --encryption-key-name <KeyName>",
"NativeIaC": "```bicep\n// Storage account encrypted with a customer-managed key (CMK)\nresource stg 'Microsoft.Storage/storageAccounts@2023-01-01' = {\n name: '<example_resource_name>'\n location: resourceGroup().location\n kind: 'StorageV2'\n sku: { name: 'Standard_LRS' }\n identity: { type: 'SystemAssigned' } // Required for Storage to access the Key Vault key\n properties: {\n encryption: {\n keySource: 'Microsoft.Keyvault' // CRITICAL: switches encryption from Microsoft.Storage to CMK\n keyVaultProperties: {\n keyName: '<KeyName>'\n keyVaultUri: '<KeyVaultURI>' // Uses latest key version if not specified\n }\n }\n }\n}\n```",
"Other": "1. In the Azure portal, go to Storage accounts and open the account used by your Activity Log diagnostic setting\n2. Select Identity > System assigned > set Status to On > Save\n3. Go to Settings > Encryption\n4. Select Customer-managed keys, choose your Key vault and Key, then click Save\n5. Ensure the storage account's identity has Get, Wrap Key, and Unwrap Key permissions on the key in Key Vault",
"Terraform": "```hcl\n# Storage account encrypted with a customer-managed key (CMK)\nresource \"azurerm_storage_account\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"<example_location>\"\n account_tier = \"Standard\"\n account_replication_type = \"LRS\"\n\n identity {\n type = \"SystemAssigned\" # Required for Storage to access the Key Vault key\n }\n\n customer_managed_key {\n key_vault_key_id = \"<key_vault_key_id>\" # CRITICAL: enables CMK by pointing to the Key Vault key\n }\n}\n```"
},
"Recommendation": {
"Text": "1. Go to Activity log 2. Select Export 3. Select Subscription 4. In section Storage Account, note the name of the Storage account 5. Close the Export Audit Logs blade. Close the Monitor - Activity Log blade. 6. In right column, Click service Storage Accounts to access Storage account blade 7. Click on the storage account name noted in step 4. This will open blade specific to that storage account 8. Under Security + networking, click Encryption. 9. Ensure Customer-managed keys is selected and Key URI is set.",
"Url": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log?tabs=cli#managing-legacy-log-profiles"
"Text": "Encrypt the storage account that holds exported **Activity Logs** with **Customer-Managed Keys** via Azure Key Vault or Managed HSM. Apply **least privilege** to key usage, enforce regular rotation and revocation, and enable soft delete and purge protection. Complement with network isolation and immutable retention for **defense in depth**.",
"Url": "https://hub.prowler.com/check/monitor_storage_account_with_activity_logs_cmk_encrypted"
}
},
"Categories": [],
"Categories": [
"encryption"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "NOTE: You must have your key vault setup to utilize this. All Audit Logs will be encrypted with a key you provide. You will need to set up customer managed keys separately, and you will select which key to use via the instructions here. You will be responsible for the lifecycle of the keys, and will need to manually replace them at your own determined intervals to keep the data secure."
@@ -1,30 +1,38 @@
{
"Provider": "azure",
"CheckID": "monitor_storage_account_with_activity_logs_is_private",
"CheckTitle": "Ensure the Storage Container Storing the Activity Logs is not Publicly Accessible",
"CheckTitle": "Storage account storing activity logs does not allow public blob access",
"CheckType": [],
"ServiceName": "monitor",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "Monitor",
"ResourceType": "microsoft.storage/storageaccounts",
"ResourceGroup": "monitoring",
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
"Risk": "Allowing public access to activity log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.",
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
"Description": "**Azure Monitor Activity Logs** sent to a **Storage account** are evaluated for **Blob public access**. The finding identifies whether the account that stores the logs has `AllowBlobPublicAccess` turned on.",
"Risk": "Exposed log data undermines **confidentiality** by revealing operations, resource IDs, IPs, and identities.\n\nAdversaries gain **reconnaissance** to map controls, craft targeted attacks, and time actions to avoid detection, enabling **lateral movement** and broader compromise.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
"https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-network-security#ns-2-secure-cloud-services-with-network-controls",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/check-for-publicly-accessible-activity-log-storage-container.html"
],
"Remediation": {
"Code": {
"CLI": "az storage container set-permission --name insights-activity-logs --account-name <Storage Account Name> --public-access off",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/check-for-publicly-accessible-activity-log-storage-container.html",
"Terraform": "https://docs.prowler.com/checks/azure/azure-logging-policies/ensure-the-storage-container-storing-the-activity-logs-is-not-publicly-accessible#terraform"
"CLI": "az storage account update --name <STORAGE_ACCOUNT_NAME> --resource-group <RESOURCE_GROUP_NAME> --allow-blob-public-access false",
"NativeIaC": "```bicep\n// Set storage account to disallow public blob access\nresource sa 'Microsoft.Storage/storageAccounts@2023-01-01' = {\n name: '<example_resource_name>'\n location: resourceGroup().location\n sku: { name: 'Standard_LRS' }\n kind: 'StorageV2'\n properties: {\n allowBlobPublicAccess: false // Critical: disables public access at the account level\n }\n}\n```",
"Other": "1. In Azure Portal, go to the storage account used by the diagnostic/Activity Log export\n2. Under Settings, select Configuration\n3. Set \"Allow Blob public access\" to Disabled\n4. Click Save",
"Terraform": "```hcl\n# Disable public blob access on the storage account\nresource \"azurerm_storage_account\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"<example_resource_name>\"\n account_tier = \"Standard\"\n account_replication_type = \"LRS\"\n\n allow_blob_public_access = false # Critical: disables public access at the account level\n}\n```"
},
"Recommendation": {
"Text": "1. From Azure Home select the Portal Menu 2. Search for Storage Accounts to access Storage account blade 3. Click on the storage account name 4. Click on Configuration under settings 5. Select Enabled under 'Allow Blob public access'",
"Url": "https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-network-security#ns-2-secure-cloud-services-with-network-controls"
"Text": "Set `AllowBlobPublicAccess=false` on the storage account holding logs. Enforce **least privilege** via RBAC or scoped SAS, use **private endpoints** and network restrictions, and enable **immutability** for log containers to add **defense in depth** and prevent unauthorized access.",
"Url": "https://hub.prowler.com/check/monitor_storage_account_with_activity_logs_is_private"
}
},
"Categories": [],
"Categories": [
"internet-exposed",
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "Configuring container Access policy to private will remove access from the container for everyone except owners of the storage account. Access policy needs to be set explicitly in order to allow access to other desired users."
+31
View File
@@ -1,3 +1,4 @@
import os
import sys
from argparse import Namespace
from importlib import import_module
@@ -70,3 +71,33 @@ def validate_asff_usage(
False,
f"json-asff output format is only available for the aws provider, but {provider} was selected",
)
def validate_elasticsearch_arguments(arguments: Namespace) -> tuple[bool, str]:
"""Validate Elasticsearch-related arguments."""
if getattr(arguments, "elasticsearch", False):
es_url = getattr(arguments, "elasticsearch_url", None) or os.environ.get(
"ELASTICSEARCH_URL"
)
if not es_url:
return (
False,
"Elasticsearch URL is required when --elasticsearch is set (use --elasticsearch-url or ELASTICSEARCH_URL env var)",
)
api_key = getattr(arguments, "elasticsearch_api_key", None) or os.environ.get(
"ELASTICSEARCH_API_KEY"
)
username = getattr(arguments, "elasticsearch_username", None) or os.environ.get(
"ELASTICSEARCH_USERNAME"
)
password = getattr(arguments, "elasticsearch_password", None) or os.environ.get(
"ELASTICSEARCH_PASSWORD"
)
if not api_key and not (username and password):
return (
False,
"Elasticsearch requires either --elasticsearch-api-key or both --elasticsearch-username and --elasticsearch-password",
)
return (True, "")
+32
View File
@@ -1,3 +1,4 @@
import os
from dataclasses import dataclass
from os import makedirs
from os.path import isdir
@@ -26,6 +27,15 @@ class ProviderOutputOptions:
output_filename: str
only_logs: bool
unix_timestamp: bool
# Elasticsearch integration options
elasticsearch_enabled: bool
elasticsearch_url: str
elasticsearch_index: str
elasticsearch_api_key: str
elasticsearch_username: str
elasticsearch_password: str
elasticsearch_skip_tls_verify: bool
send_es_only_fails: bool
def __init__(self, arguments, bulk_checks_metadata):
self.status = getattr(arguments, "status", None)
@@ -38,6 +48,28 @@ class ProviderOutputOptions:
self.shodan_api_key = getattr(arguments, "shodan", None)
self.fixer = getattr(arguments, "fixer", None)
# Elasticsearch integration options
self.elasticsearch_enabled = getattr(arguments, "elasticsearch", False)
self.elasticsearch_url = getattr(
arguments, "elasticsearch_url", None
) or os.environ.get("ELASTICSEARCH_URL")
self.elasticsearch_index = getattr(
arguments, "elasticsearch_index", "prowler-findings"
)
self.elasticsearch_api_key = getattr(
arguments, "elasticsearch_api_key", None
) or os.environ.get("ELASTICSEARCH_API_KEY")
self.elasticsearch_username = getattr(
arguments, "elasticsearch_username", None
) or os.environ.get("ELASTICSEARCH_USERNAME")
self.elasticsearch_password = getattr(
arguments, "elasticsearch_password", None
) or os.environ.get("ELASTICSEARCH_PASSWORD")
self.elasticsearch_skip_tls_verify = getattr(
arguments, "elasticsearch_skip_tls_verify", False
)
self.send_es_only_fails = getattr(arguments, "send_es_only_fails", False)
# Shodan API Key
if self.shodan_api_key:
# TODO: revisit this logic
+1 -1
View File
@@ -92,7 +92,7 @@ maintainers = [{name = "Prowler Engineering", email = "engineering@prowler.com"}
name = "prowler"
readme = "README.md"
requires-python = ">3.9.1,<3.13"
version = "5.18.0"
version = "5.19.0"
[project.scripts]
prowler = "prowler.__main__:prowler"
+1
View File
@@ -77,6 +77,7 @@ Patterns tailored for Prowler development:
| `prowler-provider` | Add new cloud providers |
| `prowler-pr` | Pull request conventions |
| `prowler-docs` | Documentation style guide |
| `prowler-attack-paths-query` | Create Attack Paths openCypher queries |
### Meta Skills
+497
View File
@@ -0,0 +1,497 @@
---
name: prowler-attack-paths-query
description: >
Creates Prowler Attack Paths openCypher queries for graph analysis (compatible with Neo4j and Neptune).
Trigger: When creating or updating Attack Paths queries that detect privilege escalation paths,
network exposure, or security misconfigurations in cloud environments.
license: Apache-2.0
metadata:
author: prowler-cloud
version: "1.0"
scope: [root, api]
auto_invoke:
- "Creating Attack Paths queries"
- "Updating existing Attack Paths queries"
- "Adding privilege escalation detection queries"
allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, Task
---
## Overview
Attack Paths queries are openCypher queries that analyze cloud infrastructure graphs (ingested via Cartography) to detect security risks like privilege escalation paths, network exposure, and misconfigurations.
Queries are written in **openCypher Version 9** to ensure compatibility with both Neo4j and Amazon Neptune.
---
## Input Sources
Queries can be created from:
1. **pathfinding.cloud ID** (e.g., `ECS-001`, `GLUE-001`)
- The JSON index contains: `id`, `name`, `description`, `services`, `permissions`, `exploitationSteps`, `prerequisites`, etc.
- Reference: https://github.com/DataDog/pathfinding.cloud
**Fetching a single path by ID** - The aggregated `paths.json` is too large for WebFetch
(content gets truncated). Use Bash with `curl` and a JSON parser instead:
Prefer `jq` (concise), fall back to `python3` (guaranteed in this Python project):
```bash
# With jq
curl -s https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json \
| jq '.[] | select(.id == "ecs-002")'
# With python3 (fallback)
curl -s https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json \
| python3 -c "import json,sys; print(json.dumps(next((p for p in json.load(sys.stdin) if p['id']=='ecs-002'), None), indent=2))"
```
2. **Listing Available Attack Paths**
- Use Bash to list available paths from the JSON index:
```bash
# List all path IDs and names (jq)
curl -s https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json \
| jq -r '.[] | "\(.id): \(.name)"'
# List all path IDs and names (python3 fallback)
curl -s https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json \
| python3 -c "import json,sys; [print(f\"{p['id']}: {p['name']}\") for p in json.load(sys.stdin)]"
# List paths filtered by service prefix
curl -s https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json \
| jq -r '.[] | select(.id | startswith("ecs")) | "\(.id): \(.name)"'
```
3. **Natural Language Description**
- User describes the Attack Paths in plain language
- Agent maps to appropriate openCypher patterns
---
## Query Structure
### File Location
```
api/src/backend/api/attack_paths/queries/{provider}.py
```
Example: `api/src/backend/api/attack_paths/queries/aws.py`
### Query Definition Pattern
```python
from api.attack_paths.queries.types import (
AttackPathsQueryAttribution,
AttackPathsQueryDefinition,
AttackPathsQueryParameterDefinition,
)
from tasks.jobs.attack_paths.config import PROWLER_FINDING_LABEL
# {REFERENCE_ID} (e.g., EC2-001, GLUE-001)
AWS_{QUERY_NAME} = AttackPathsQueryDefinition(
id="aws-{kebab-case-name}",
name="{Human-friendly label} ({REFERENCE_ID})",
short_description="{Brief explanation of the attack, no technical permissions.}",
description="{Detailed description of the attack vector and impact.}",
attribution=AttackPathsQueryAttribution(
text="pathfinding.cloud - {REFERENCE_ID} - {permission1} + {permission2}",
link="https://pathfinding.cloud/paths/{reference_id_lowercase}",
),
provider="aws",
cypher=f"""
// Find principals with {permission1}
MATCH path_principal = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)--(policy:AWSPolicy)--(stmt:AWSPolicyStatement)
WHERE stmt.effect = 'Allow'
AND any(action IN stmt.action WHERE
toLower(action) = '{permission1_lowercase}'
OR toLower(action) = '{service}:*'
OR action = '*'
)
// Find {permission2}
MATCH (principal)--(policy2:AWSPolicy)--(stmt2:AWSPolicyStatement)
WHERE stmt2.effect = 'Allow'
AND any(action IN stmt2.action WHERE
toLower(action) = '{permission2_lowercase}'
OR toLower(action) = '{service2}:*'
OR action = '*'
)
// Find target resources (MUST chain from `aws` for provider isolation)
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {{arn: '{service}.amazonaws.com'}})
WHERE any(resource IN stmt.resource WHERE
resource = '*'
OR target_role.arn CONTAINS resource
OR resource CONTAINS target_role.name
)
UNWIND nodes(path_principal) + nodes(path_target) as n
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {{status: 'FAIL', provider_uid: $provider_uid}})
RETURN path_principal, path_target,
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
""",
parameters=[],
)
```
### Register in Query List
Add to the `{PROVIDER}_QUERIES` list at the bottom of the file:
```python
AWS_QUERIES: list[AttackPathsQueryDefinition] = [
# ... existing queries ...
AWS_{NEW_QUERY_NAME}, # Add here
]
```
---
## Step-by-Step Creation Process
### 1. Read the Queries Module
**FIRST**, read all files in the queries module to understand the structure:
```
api/src/backend/api/attack_paths/queries/
├── __init__.py # Module exports
├── types.py # AttackPathsQueryDefinition, AttackPathsQueryParameterDefinition
├── registry.py # Query registry logic
└── {provider}.py # Provider-specific queries (e.g., aws.py)
```
Read these files to learn:
- Type definitions and available fields
- How queries are registered
- Current query patterns, style, and naming conventions
### 2. Determine Schema Source
Check the Cartography dependency in `api/pyproject.toml`:
```bash
grep cartography api/pyproject.toml
```
Parse the dependency to determine the schema source:
**If git-based dependency** (e.g., `cartography @ git+https://github.com/prowler-cloud/cartography@0.126.1`):
- Extract the repository (e.g., `prowler-cloud/cartography`)
- Extract the version/tag (e.g., `0.126.1`)
- Fetch schema from that repository at that tag
**If PyPI dependency** (e.g., `cartography = "^0.126.0"` or `cartography>=0.126.0`):
- Extract the version (e.g., `0.126.0`)
- Use the official `cartography-cncf` repository
**Schema URL patterns** (ALWAYS use the specific version tag, not master/main):
```
# Official Cartography (cartography-cncf)
https://raw.githubusercontent.com/cartography-cncf/cartography/refs/tags/{version}/docs/root/modules/{provider}/schema.md
# Prowler fork (prowler-cloud)
https://raw.githubusercontent.com/prowler-cloud/cartography/refs/tags/{version}/docs/root/modules/{provider}/schema.md
```
**Examples**:
```bash
# For prowler-cloud/cartography@0.126.1 (git), fetch AWS schema:
https://raw.githubusercontent.com/prowler-cloud/cartography/refs/tags/0.126.1/docs/root/modules/aws/schema.md
# For cartography = "^0.126.0" (PyPI), fetch AWS schema:
https://raw.githubusercontent.com/cartography-cncf/cartography/refs/tags/0.126.0/docs/root/modules/aws/schema.md
```
**IMPORTANT**: Always match the schema version to the dependency version in `pyproject.toml`. Using master/main may reference node labels or properties that don't exist in the deployed version.
**Additional Prowler Labels**: The Attack Paths sync task adds extra labels:
- `ProwlerFinding` - Prowler finding nodes with `status`, `provider_uid` properties
- `ProviderResource` - Generic resource marker
- `{Provider}Resource` - Provider-specific marker (e.g., `AWSResource`)
These are defined in `api/src/backend/tasks/jobs/attack_paths/config.py`.
### 3. Consult the Schema for Available Data
Use the Cartography schema to discover:
- What node labels exist for the target resources
- What properties are available on those nodes
- What relationships connect the nodes
This informs query design by showing what data is actually available to query.
### 4. Create Query Definition
Use the standard pattern (see above) with:
- **id**: Auto-generated as `{provider}-{kebab-case-description}`
- **name**: Short, human-friendly label. No raw IAM permissions. For sourced queries (e.g., pathfinding.cloud), append the reference ID in parentheses: `"EC2 Instance Launch with Privileged Role (EC2-001)"`. If the name already has parentheses, prepend the ID inside them: `"ECS Service Creation with Privileged Role (ECS-003 - Existing Cluster)"`.
- **short_description**: Brief explanation of the attack, no technical permissions. E.g., "Launch EC2 instances with privileged IAM roles to gain their permissions via IMDS."
- **description**: Full technical explanation of the attack vector and impact. Plain text only, no HTML or technical permissions here.
- **provider**: Provider identifier (aws, azure, gcp, kubernetes, github)
- **cypher**: The openCypher query with proper escaping
- **parameters**: Optional list of user-provided parameters (use `parameters=[]` if none needed)
- **attribution**: Optional `AttackPathsQueryAttribution(text, link)` for sourced queries. The `text` includes the source, reference ID, and technical permissions (e.g., `"pathfinding.cloud - EC2-001 - iam:PassRole + ec2:RunInstances"`). The `link` is the URL with a lowercase ID (e.g., `"https://pathfinding.cloud/paths/ec2-001"`). Omit (defaults to `None`) for non-sourced queries.
### 5. Add Query to Provider List
Add the constant to the `{PROVIDER}_QUERIES` list.
---
## Query Naming Conventions
### Query ID
```
{provider}-{category}-{description}
```
Examples:
- `aws-ec2-privesc-passrole-iam`
- `aws-iam-privesc-attach-role-policy-assume-role`
- `aws-rds-unencrypted-storage`
### Query Constant Name
```
{PROVIDER}_{CATEGORY}_{DESCRIPTION}
```
Examples:
- `AWS_EC2_PRIVESC_PASSROLE_IAM`
- `AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY_ASSUME_ROLE`
- `AWS_RDS_UNENCRYPTED_STORAGE`
---
## Query Categories
| Category | Description | Example |
| -------------------- | ------------------------------ | ------------------------- |
| Basic Resource | List resources with properties | RDS instances, S3 buckets |
| Network Exposure | Internet-exposed resources | EC2 with public IPs |
| Privilege Escalation | IAM privilege escalation paths | PassRole + RunInstances |
| Data Access | Access to sensitive data | EC2 with S3 access |
---
## Common openCypher Patterns
### Match Account and Principal
```cypher
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)--(policy:AWSPolicy)--(stmt:AWSPolicyStatement)
```
### Check IAM Action Permissions
```cypher
WHERE stmt.effect = 'Allow'
AND any(action IN stmt.action WHERE
toLower(action) = 'iam:passrole'
OR toLower(action) = 'iam:*'
OR action = '*'
)
```
### Find Roles Trusting a Service
```cypher
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {arn: 'ec2.amazonaws.com'})
```
### Check Resource Scope
```cypher
WHERE any(resource IN stmt.resource WHERE
resource = '*'
OR target_role.arn CONTAINS resource
OR resource CONTAINS target_role.name
)
```
### Include Prowler Findings
```cypher
UNWIND nodes(path_principal) + nodes(path_target) as n
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL} {status: 'FAIL', provider_uid: $provider_uid})
RETURN path_principal, path_target,
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
```
---
## Common Node Labels by Provider
### AWS
| Label | Description |
| -------------------- | ----------------------------------- |
| `AWSAccount` | AWS account root |
| `AWSPrincipal` | IAM principal (user, role, service) |
| `AWSRole` | IAM role |
| `AWSUser` | IAM user |
| `AWSPolicy` | IAM policy |
| `AWSPolicyStatement` | Policy statement |
| `EC2Instance` | EC2 instance |
| `EC2SecurityGroup` | Security group |
| `S3Bucket` | S3 bucket |
| `RDSInstance` | RDS database instance |
| `LoadBalancer` | Classic ELB |
| `LoadBalancerV2` | ALB/NLB |
| `LaunchTemplate` | EC2 launch template |
### Common Relationships
| Relationship | Description |
| ---------------------- | ----------------------- |
| `TRUSTS_AWS_PRINCIPAL` | Role trust relationship |
| `STS_ASSUMEROLE_ALLOW` | Can assume role |
| `POLICY` | Has policy attached |
| `STATEMENT` | Policy has statement |
---
## Parameters
For queries requiring user input, define parameters:
```python
parameters=[
AttackPathsQueryParameterDefinition(
name="ip",
label="IP address",
description="Public IP address, e.g. 192.0.2.0.",
placeholder="192.0.2.0",
),
AttackPathsQueryParameterDefinition(
name="tag_key",
label="Tag key",
description="Tag key to filter resources.",
placeholder="Environment",
),
],
```
---
## Best Practices
1. **Always filter by provider_uid**: Use `{id: $provider_uid}` on account nodes and `{provider_uid: $provider_uid}` on ProwlerFinding nodes
2. **Use consistent naming**: Follow existing patterns in the file
3. **Include Prowler findings**: Always add the OPTIONAL MATCH for ProwlerFinding nodes
4. **Return distinct findings**: Use `collect(DISTINCT pf)` to avoid duplicates
5. **Comment the query purpose**: Add inline comments explaining each MATCH clause
6. **Validate schema first**: Ensure all node labels and properties exist in Cartography schema
7. **Chain all MATCHes from the root account node**: Every `MATCH` clause must connect to the `aws` variable (or another variable already bound to the account's subgraph). The tenant database contains data from multiple providers — an unanchored `MATCH` would return nodes from all providers, breaking provider isolation.
```cypher
// WRONG: matches ALL AWSRoles across all providers in the tenant DB
MATCH (role:AWSRole) WHERE role.name = 'admin'
// CORRECT: scoped to the specific account's subgraph
MATCH (aws)--(role:AWSRole) WHERE role.name = 'admin'
```
---
## openCypher Compatibility
Queries must be written in **openCypher Version 9** to ensure compatibility with both Neo4j and Amazon Neptune.
> **Why Version 9?** Amazon Neptune implements openCypher Version 9. By targeting this specification, queries work on both Neo4j and Neptune without modification.
### Avoid These (Not in openCypher spec)
| Feature | Reason |
| --------------------------------------------------- | ----------------------------------------------- |
| APOC procedures (`apoc.*`) | Neo4j-specific plugin, not available in Neptune |
| Virtual nodes (`apoc.create.vNode`) | APOC-specific |
| Virtual relationships (`apoc.create.vRelationship`) | APOC-specific |
| Neptune extensions | Not available in Neo4j |
| `reduce()` function | Use `UNWIND` + aggregation instead |
| `FOREACH` clause | Use `WITH` + `UNWIND` + `SET` instead |
| Regex match operator (`=~`) | Not supported in Neptune |
### CALL Subqueries
Supported with limitations:
- Use `WITH` clause to import variables: `CALL { WITH var ... }`
- Updates inside CALL subqueries are NOT supported
- Emitted variables cannot overlap with variables before the CALL
---
## Reference
### pathfinding.cloud (Attack Path Definitions)
- **Repository**: https://github.com/DataDog/pathfinding.cloud
- **All paths JSON**: `https://raw.githubusercontent.com/DataDog/pathfinding.cloud/main/docs/paths.json`
- Use WebFetch to query specific paths or list available services
### Cartography Schema
- **URL pattern**: `https://raw.githubusercontent.com/{org}/cartography/refs/tags/{version}/docs/root/modules/{provider}/schema.md`
- Always use the version from `api/pyproject.toml`, not master/main
### openCypher Specification
- **Neptune openCypher compliance** (what Neptune supports): https://docs.aws.amazon.com/neptune/latest/userguide/feature-opencypher-compliance.html
- **Rewriting Cypher for Neptune** (converting Neo4j-specific syntax): https://docs.aws.amazon.com/neptune/latest/userguide/migration-opencypher-rewrites.html
- **openCypher project** (spec, grammar, TCK): https://github.com/opencypher/openCypher
---
## Learning from the Queries Module
**IMPORTANT**: Before creating a new query, ALWAYS read the entire queries module:
```
api/src/backend/api/attack_paths/queries/
├── __init__.py # Module exports
├── types.py # Type definitions
├── registry.py # Registry logic
└── {provider}.py # Provider queries (aws.py, etc.)
```
Use the existing queries to learn:
- Query structure and formatting
- Variable naming conventions
- How to include Prowler findings
- Comment style
> **Compatibility Warning**: Some existing queries use Neo4j-specific features
> (e.g., `apoc.create.vNode`, `apoc.create.vRelationship`, regex `=~`) that are
> **NOT compatible** with Amazon Neptune. Use these queries to learn general
> patterns (structure, naming, Prowler findings integration, comment style) but
> **DO NOT copy APOC procedures or other Neo4j-specific syntax** into new queries.
> New queries must be pure openCypher Version 9. Refer to the
> [openCypher Compatibility](#opencypher-compatibility) section for the full list
> of features to avoid.
**DO NOT** use generic templates. Match the exact style of existing **compatible** queries in the file.
View File
@@ -0,0 +1,216 @@
from argparse import Namespace
from unittest.mock import patch
from prowler.providers.common.arguments import validate_elasticsearch_arguments
class TestValidateElasticsearchArguments:
def test_elasticsearch_disabled(self):
args = Namespace(elasticsearch=False)
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
assert error == ""
def test_elasticsearch_no_flag(self):
args = Namespace()
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
assert error == ""
def test_elasticsearch_enabled_with_url_and_api_key(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url="https://localhost:9200",
elasticsearch_api_key="test-key",
elasticsearch_username=None,
elasticsearch_password=None,
)
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
assert error == ""
def test_elasticsearch_enabled_with_url_and_basic_auth(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url="https://localhost:9200",
elasticsearch_api_key=None,
elasticsearch_username="elastic",
elasticsearch_password="changeme",
)
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
assert error == ""
def test_elasticsearch_enabled_no_url(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url=None,
elasticsearch_api_key="test-key",
elasticsearch_username=None,
elasticsearch_password=None,
)
with patch.dict("os.environ", {}, clear=True):
valid, error = validate_elasticsearch_arguments(args)
assert valid is False
assert "URL is required" in error
def test_elasticsearch_enabled_no_auth(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url="https://localhost:9200",
elasticsearch_api_key=None,
elasticsearch_username=None,
elasticsearch_password=None,
)
with patch.dict("os.environ", {}, clear=True):
valid, error = validate_elasticsearch_arguments(args)
assert valid is False
assert "requires either" in error
def test_elasticsearch_enabled_username_without_password(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url="https://localhost:9200",
elasticsearch_api_key=None,
elasticsearch_username="elastic",
elasticsearch_password=None,
)
with patch.dict("os.environ", {}, clear=True):
valid, error = validate_elasticsearch_arguments(args)
assert valid is False
assert "requires either" in error
def test_elasticsearch_url_from_env(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url=None,
elasticsearch_api_key="test-key",
elasticsearch_username=None,
elasticsearch_password=None,
)
with patch.dict(
"os.environ", {"ELASTICSEARCH_URL": "https://localhost:9200"}, clear=False
):
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
def test_elasticsearch_api_key_from_env(self):
args = Namespace(
elasticsearch=True,
elasticsearch_url="https://localhost:9200",
elasticsearch_api_key=None,
elasticsearch_username=None,
elasticsearch_password=None,
)
with patch.dict(
"os.environ", {"ELASTICSEARCH_API_KEY": "env-key"}, clear=False
):
valid, error = validate_elasticsearch_arguments(args)
assert valid is True
class TestElasticsearchParserArgs:
def setup_method(self):
self.patch_get_available_providers = patch(
"prowler.providers.common.provider.Provider.get_available_providers",
new=lambda: [
"aws",
"azure",
"gcp",
"kubernetes",
"m365",
"github",
"iac",
"nhn",
"mongodbatlas",
"oraclecloud",
"alibabacloud",
"cloudflare",
"openstack",
],
)
self.patch_get_available_providers.start()
from prowler.lib.cli.parser import ProwlerArgumentParser
self.parser = ProwlerArgumentParser()
def teardown_method(self):
self.patch_get_available_providers.stop()
def test_elasticsearch_flag(self):
command = [
"prowler",
"aws",
"--elasticsearch",
"--elasticsearch-url",
"https://localhost:9200",
"--elasticsearch-api-key",
"key",
]
parsed = self.parser.parse(command)
assert parsed.elasticsearch is True
def test_elasticsearch_default_index(self):
command = [
"prowler",
"aws",
"--elasticsearch",
"--elasticsearch-url",
"https://localhost:9200",
"--elasticsearch-api-key",
"key",
]
parsed = self.parser.parse(command)
assert parsed.elasticsearch_index == "prowler-findings"
def test_elasticsearch_custom_index(self):
command = [
"prowler",
"aws",
"--elasticsearch",
"--elasticsearch-url",
"https://localhost:9200",
"--elasticsearch-api-key",
"key",
"--elasticsearch-index",
"custom-index",
]
parsed = self.parser.parse(command)
assert parsed.elasticsearch_index == "custom-index"
def test_elasticsearch_skip_tls_verify(self):
command = [
"prowler",
"aws",
"--elasticsearch",
"--elasticsearch-url",
"https://localhost:9200",
"--elasticsearch-api-key",
"key",
"--elasticsearch-skip-tls-verify",
]
parsed = self.parser.parse(command)
assert parsed.elasticsearch_skip_tls_verify is True
def test_send_es_only_fails(self):
command = [
"prowler",
"aws",
"--elasticsearch",
"--elasticsearch-url",
"https://localhost:9200",
"--elasticsearch-api-key",
"key",
"--send-es-only-fails",
]
parsed = self.parser.parse(command)
assert parsed.send_es_only_fails is True
def test_elasticsearch_defaults_off(self):
command = ["prowler", "aws"]
parsed = self.parser.parse(command)
assert parsed.elasticsearch is False
assert parsed.elasticsearch_url is None
assert parsed.elasticsearch_skip_tls_verify is False
assert parsed.send_es_only_fails is False
@@ -0,0 +1,511 @@
import base64
import json
from datetime import date, datetime
from unittest.mock import MagicMock, patch
import pytest
import requests
from prowler.lib.integrations.elasticsearch.elasticsearch import (
ELASTICSEARCH_MAX_BATCH,
Elasticsearch,
ElasticsearchConnection,
_json_serial,
)
from prowler.lib.integrations.elasticsearch.exceptions.exceptions import (
ElasticsearchConnectionError,
ElasticsearchIndexError,
)
ES_URL = "https://localhost:9200"
ES_INDEX = "prowler-findings"
ES_API_KEY = "test-api-key"
ES_USERNAME = "elastic"
ES_PASSWORD = "changeme"
def _make_finding(status_code="FAIL", uid="finding-1"):
return {
"status_code": status_code,
"finding_info": {"uid": uid, "title": "Test finding"},
"severity": "HIGH",
}
class TestJsonSerial:
def test_datetime_serialization(self):
dt = datetime(2024, 1, 15, 10, 30, 0)
assert _json_serial(dt) == "2024-01-15T10:30:00"
def test_date_serialization(self):
d = date(2024, 1, 15)
assert _json_serial(d) == "2024-01-15"
def test_set_serialization(self):
s = {1, 2, 3}
result = _json_serial(s)
assert isinstance(result, list)
assert sorted(result) == [1, 2, 3]
def test_unsupported_type_raises(self):
with pytest.raises(TypeError, match="not JSON serializable"):
_json_serial(object())
class TestElasticsearchInit:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_init_with_api_key(self, mock_session):
mock_session.return_value = MagicMock()
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
api_key=ES_API_KEY,
)
assert es._url == ES_URL
assert es._index == ES_INDEX
assert es._api_key == ES_API_KEY
assert es._username is None
assert es._password is None
assert es._skip_tls_verify is False
assert es._send_only_fails is False
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_init_with_basic_auth(self, mock_session):
mock_session.return_value = MagicMock()
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
username=ES_USERNAME,
password=ES_PASSWORD,
)
assert es._username == ES_USERNAME
assert es._password == ES_PASSWORD
assert es._api_key is None
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_init_url_trailing_slash_stripped(self, mock_session):
mock_session.return_value = MagicMock()
es = Elasticsearch(
url="https://localhost:9200/",
index=ES_INDEX,
api_key=ES_API_KEY,
)
assert es._url == "https://localhost:9200"
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_init_empty_findings_default(self, mock_session):
mock_session.return_value = MagicMock()
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
assert es._findings == []
class TestFilterFindings:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_filter_findings_send_only_fails(self, mock_session):
mock_session.return_value = MagicMock()
findings = [
_make_finding("FAIL", "f1"),
_make_finding("PASS", "f2"),
_make_finding("FAIL", "f3"),
]
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
api_key=ES_API_KEY,
findings=findings,
send_only_fails=True,
)
assert len(es._findings) == 2
assert all(f["status_code"] == "FAIL" for f in es._findings)
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_filter_findings_send_all(self, mock_session):
mock_session.return_value = MagicMock()
findings = [
_make_finding("FAIL", "f1"),
_make_finding("PASS", "f2"),
]
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
api_key=ES_API_KEY,
findings=findings,
send_only_fails=False,
)
assert len(es._findings) == 2
class TestCreateSession:
def test_create_session_api_key_auth(self):
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
api_key=ES_API_KEY,
)
assert es._session.headers["Authorization"] == f"ApiKey {ES_API_KEY}"
assert es._session.headers["Content-Type"] == "application/json"
assert es._session.verify is True
def test_create_session_basic_auth(self):
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
username=ES_USERNAME,
password=ES_PASSWORD,
)
expected_creds = base64.b64encode(
f"{ES_USERNAME}:{ES_PASSWORD}".encode()
).decode()
assert es._session.headers["Authorization"] == f"Basic {expected_creds}"
def test_create_session_skip_tls(self):
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
api_key=ES_API_KEY,
skip_tls_verify=True,
)
assert es._session.verify is False
def test_create_session_no_auth(self):
es = Elasticsearch(
url=ES_URL,
index=ES_INDEX,
)
assert "Authorization" not in es._session.headers
class TestTestConnection:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_connection_success(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_session.get.return_value = mock_response
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.test_connection()
assert isinstance(result, ElasticsearchConnection)
assert result.connected is True
assert result.error_message == ""
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_connection_auth_failure(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 401
mock_response.text = "Unauthorized"
mock_session.get.return_value = mock_response
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key="bad-key")
result = es.test_connection()
assert result.connected is False
assert "Authentication failed" in result.error_message
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_connection_error(self, mock_create_session):
mock_session = MagicMock()
mock_session.get.side_effect = requests.exceptions.ConnectionError(
"Connection refused"
)
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.test_connection()
assert result.connected is False
assert "Could not connect" in result.error_message
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_connection_ssl_error(self, mock_create_session):
mock_session = MagicMock()
mock_session.get.side_effect = requests.exceptions.SSLError("SSL error")
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.test_connection()
assert result.connected is False
assert "SSL/TLS error" in result.error_message
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_connection_timeout(self, mock_create_session):
mock_session = MagicMock()
mock_session.get.side_effect = requests.exceptions.Timeout("Timed out")
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.test_connection()
assert result.connected is False
assert "timed out" in result.error_message
class TestCreateIndexIfNotExists:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_index_already_exists(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_session.head.return_value = mock_response
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.create_index_if_not_exists()
assert result is True
mock_session.put.assert_not_called()
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_index_created_successfully(self, mock_create_session):
mock_session = MagicMock()
# HEAD returns 404 (index doesn't exist)
head_response = MagicMock()
head_response.status_code = 404
mock_session.head.return_value = head_response
# PUT creates the index
put_response = MagicMock()
put_response.status_code = 200
mock_session.put.return_value = put_response
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.create_index_if_not_exists()
assert result is True
mock_session.put.assert_called_once()
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_index_creation_fails(self, mock_create_session):
mock_session = MagicMock()
head_response = MagicMock()
head_response.status_code = 404
mock_session.head.return_value = head_response
put_response = MagicMock()
put_response.status_code = 400
put_response.text = "Bad request"
mock_session.put.return_value = put_response
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.create_index_if_not_exists()
assert result is False
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_index_creation_exception(self, mock_create_session):
mock_session = MagicMock()
mock_session.head.side_effect = Exception("Network error")
mock_create_session.return_value = mock_session
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
with pytest.raises(ElasticsearchIndexError):
es.create_index_if_not_exists()
class TestBatchSendToElasticsearch:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_no_findings(self, mock_create_session):
mock_create_session.return_value = MagicMock()
es = Elasticsearch(url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY)
result = es.batch_send_to_elasticsearch()
assert result == 0
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_send_findings_success(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"errors": False, "items": []}
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
findings = [_make_finding("FAIL", f"f{i}") for i in range(3)]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
assert result == 3
mock_session.post.assert_called_once()
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_send_findings_partial_failure(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"errors": True,
"items": [
{"index": {"status": 201}},
{"index": {"status": 400}},
{"index": {"status": 201}},
],
}
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
findings = [_make_finding("FAIL", f"f{i}") for i in range(3)]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
assert result == 2
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_send_findings_bulk_request_failure(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 500
mock_response.text = "Internal Server Error"
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
findings = [_make_finding("FAIL", "f1")]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
assert result == 0
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_send_findings_connection_error(self, mock_create_session):
mock_session = MagicMock()
mock_session.post.side_effect = Exception("Connection lost")
mock_create_session.return_value = mock_session
findings = [_make_finding("FAIL", "f1")]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
with pytest.raises(ElasticsearchConnectionError):
es.batch_send_to_elasticsearch()
class TestSendFindingsInBatches:
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_batching_with_more_than_max_batch(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"errors": False, "items": []}
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
# Create more findings than ELASTICSEARCH_MAX_BATCH
findings = [
_make_finding("FAIL", f"f{i}") for i in range(ELASTICSEARCH_MAX_BATCH + 10)
]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
# Should have been called twice (one full batch + one partial)
assert mock_session.post.call_count == 2
assert result == ELASTICSEARCH_MAX_BATCH + 10
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_finding_without_uid(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"errors": False, "items": []}
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
findings = [{"status_code": "FAIL", "severity": "HIGH"}]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
assert result == 1
# Verify the bulk body doesn't include _id
call_args = mock_session.post.call_args
body = call_args.kwargs.get("data") or call_args[1].get("data")
lines = body.strip().split("\n")
action = json.loads(lines[0])
assert "_id" not in action["index"]
@patch(
"prowler.lib.integrations.elasticsearch.elasticsearch.Elasticsearch._create_session"
)
def test_finding_with_datetime_serialization(self, mock_create_session):
mock_session = MagicMock()
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"errors": False, "items": []}
mock_session.post.return_value = mock_response
mock_create_session.return_value = mock_session
findings = [
{
"status_code": "FAIL",
"time_dt": datetime(2024, 1, 15, 10, 0, 0),
"finding_info": {"uid": "f1"},
}
]
es = Elasticsearch(
url=ES_URL, index=ES_INDEX, api_key=ES_API_KEY, findings=findings
)
result = es.batch_send_to_elasticsearch()
assert result == 1
call_args = mock_session.post.call_args
body = call_args.kwargs.get("data") or call_args[1].get("data")
lines = body.strip().split("\n")
doc = json.loads(lines[1])
assert doc["time_dt"] == "2024-01-15T10:00:00"
+20
View File
@@ -2,6 +2,26 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.19.0] (Prowler UNRELEASED)
### 🔄 Changed
- Attack Paths: Query list now shows their name and short description, when one is selected it also shows a longer description and an attribution if it has it [(#9983)](https://github.com/prowler-cloud/prowler/pull/9983)
### 🐞 Fixed
- ProviderTypeSelector crash when an unknown provider type is not present in PROVIDER_DATA [(#9991)](https://github.com/prowler-cloud/prowler/pull/9991)
---
## [1.18.1] (Prowler v5.18.1)
### 🐞 Fixed
- Scans page polling now only refreshes scan table data instead of re-rendering the entire server component tree, eliminating redundant API calls to providers, findings, and compliance endpoints every 5 seconds
---
## [1.18.0] (Prowler v5.18.0)
### 🔄 Changed
@@ -153,7 +153,7 @@ export const ProviderTypeSelector = ({
// .filter((p) => p.attributes.connection?.connected)
.map((p) => p.attributes.provider),
),
) as ProviderType[];
).filter((type): type is ProviderType => type in PROVIDER_DATA);
const renderIcon = (providerType: ProviderType) => {
const IconComponent = PROVIDER_DATA[providerType].icon;
@@ -35,7 +35,7 @@ export const QuerySelector = ({
<div className="flex flex-col gap-1">
<span className="font-medium">{query.attributes.name}</span>
<span className="text-xs text-gray-500">
{query.attributes.description}
{query.attributes.short_description}
</span>
</div>
</SelectItem>
@@ -340,6 +340,33 @@ export default function AttackPathAnalysisPage() {
onQueryChange={handleQueryChange}
/>
{queryBuilder.selectedQueryData && (
<div className="bg-bg-neutral-tertiary text-text-neutral-secondary dark:text-text-neutral-secondary rounded-md p-3 text-sm">
<p className="whitespace-pre-line">
{queryBuilder.selectedQueryData.attributes.description}
</p>
{queryBuilder.selectedQueryData.attributes.attribution && (
<p className="mt-2 text-xs">
Source:{" "}
<a
href={
queryBuilder.selectedQueryData.attributes
.attribution.link
}
target="_blank"
rel="noopener noreferrer"
className="underline"
>
{
queryBuilder.selectedQueryData.attributes
.attribution.text
}
</a>
</p>
)}
</div>
)}
{queryBuilder.selectedQuery && (
<QueryParametersForm
selectedQuery={queryBuilder.selectedQueryData}
+16 -9
View File
@@ -13,7 +13,7 @@ import {
} from "@/components/providers/table";
import { ContentLayout } from "@/components/ui";
import { DataTable } from "@/components/ui/table";
import { ProviderProps, SearchParamsProps } from "@/types";
import { PROVIDER_TYPES, ProviderProps, SearchParamsProps } from "@/types";
export default async function Providers({
searchParams,
@@ -89,15 +89,22 @@ const ProvidersTable = async ({
return acc;
}, {}) || {};
// Exclude provider types not yet supported in the UI
const enrichedProviders =
providersData?.data?.map((provider: ProviderProps) => {
const groupNames =
provider.relationships?.provider_groups?.data?.map(
(group: { id: string }) =>
providerGroupDict[group.id] || "Unknown Group",
) || [];
return { ...provider, groupNames };
}) || [];
providersData?.data
?.filter((provider: ProviderProps) =>
(PROVIDER_TYPES as readonly string[]).includes(
provider.attributes.provider,
),
)
.map((provider: ProviderProps) => {
const groupNames =
provider.relationships?.provider_groups?.data?.map(
(group: { id: string }) =>
providerGroupDict[group.id] || "Unknown Group",
) || [];
return { ...provider, groupNames };
}) || [];
return (
<>
+6 -19
View File
@@ -1,21 +1,19 @@
import { Suspense } from "react";
import { getAllProviders } from "@/actions/providers";
import { getScans, getScansByState } from "@/actions/scans";
import { getScans } from "@/actions/scans";
import { auth } from "@/auth.config";
import { MutedFindingsConfigButton } from "@/components/providers";
import {
AutoRefresh,
NoProvidersAdded,
NoProvidersConnected,
ScansFilters,
} from "@/components/scans";
import { LaunchScanWorkflow } from "@/components/scans/launch-workflow";
import { SkeletonTableScans } from "@/components/scans/table";
import { ColumnGetScans } from "@/components/scans/table/scans";
import { ScansTableWithPolling } from "@/components/scans/table/scans";
import { ContentLayout } from "@/components/ui";
import { CustomBanner } from "@/components/ui/custom/custom-banner";
import { DataTable } from "@/components/ui/table";
import {
createProviderDetailsMapping,
extractProviderUIDs,
@@ -57,15 +55,6 @@ export default async function Scans({
const hasManageScansPermission = session?.user?.permissions?.manage_scans;
// Get scans data to check for executing scans
const scansData = await getScansByState();
const hasExecutingScan = scansData?.data?.some(
(scan: ScanProps) =>
scan.attributes.state === "executing" ||
scan.attributes.state === "available",
);
// Extract provider UIDs and create provider details mapping for filtering
const providerUIDs = providersData ? extractProviderUIDs(providersData) : [];
const providerDetails = providersData
@@ -82,7 +71,6 @@ export default async function Scans({
return (
<ContentLayout title="Scans" icon="lucide:timer">
<AutoRefresh hasExecutingScan={hasExecutingScan} />
<>
<>
{!hasManageScansPermission ? (
@@ -177,11 +165,10 @@ const SSRDataTableScans = async ({
}) || [];
return (
<DataTable
key={`scans-${Date.now()}`}
columns={ColumnGetScans}
data={expandedScansData || []}
metadata={meta}
<ScansTableWithPolling
initialData={expandedScansData}
initialMeta={meta}
searchParams={searchParams}
/>
);
};
@@ -13,6 +13,7 @@ import { Form } from "@/components/ui/form";
import { toast } from "@/components/ui/toast";
import { onDemandScanFormSchema } from "@/types";
import { SCAN_LAUNCHED_EVENT } from "../table/scans/scans-table-with-polling";
import { SelectScanProvider } from "./select-scan-provider";
type ProviderInfo = {
@@ -85,6 +86,8 @@ export const LaunchScanWorkflow = ({
});
// Reset form after successful submission
form.reset();
// Notify the scans table to refresh and pick up the new scan
window.dispatchEvent(new Event(SCAN_LAUNCHED_EVENT));
}
};
+1
View File
@@ -1,3 +1,4 @@
export * from "./column-get-scans";
export * from "./data-table-row-actions";
export * from "./data-table-row-details";
export * from "./scans-table-with-polling";
@@ -0,0 +1,126 @@
"use client";
import { useCallback, useEffect, useState } from "react";
import { getScans } from "@/actions/scans";
import { AutoRefresh } from "@/components/scans";
import { DataTable } from "@/components/ui/table";
import { MetaDataProps, ScanProps, SearchParamsProps } from "@/types";
import { ColumnGetScans } from "./column-get-scans";
export const SCAN_LAUNCHED_EVENT = "scan-launched";
interface ScansTableWithPollingProps {
initialData: ScanProps[];
initialMeta?: MetaDataProps;
searchParams: SearchParamsProps;
}
const EXECUTING_STATES = ["executing", "available"] as const;
function expandScansWithProviderInfo(
scans: ScanProps[],
included?: Array<{ type: string; id: string; attributes: any }>,
) {
return (
scans?.map((scan) => {
const providerId = scan.relationships?.provider?.data?.id;
if (!providerId) {
return { ...scan, providerInfo: undefined };
}
const providerData = included?.find(
(item) => item.type === "providers" && item.id === providerId,
);
if (!providerData) {
return { ...scan, providerInfo: undefined };
}
return {
...scan,
providerInfo: {
provider: providerData.attributes.provider,
uid: providerData.attributes.uid,
alias: providerData.attributes.alias,
},
};
}) || []
);
}
export function ScansTableWithPolling({
initialData,
initialMeta,
searchParams,
}: ScansTableWithPollingProps) {
const [scansData, setScansData] = useState<ScanProps[]>(initialData);
const [meta, setMeta] = useState<MetaDataProps | undefined>(initialMeta);
const hasExecutingScan = scansData.some((scan) =>
EXECUTING_STATES.includes(
scan.attributes.state as (typeof EXECUTING_STATES)[number],
),
);
const handleRefresh = useCallback(async () => {
const page = parseInt(searchParams.page?.toString() || "1", 10);
const pageSize = parseInt(searchParams.pageSize?.toString() || "10", 10);
const sort = searchParams.sort?.toString();
const filters = Object.fromEntries(
Object.entries(searchParams).filter(
([key]) => key.startsWith("filter[") && key !== "scanId",
),
);
const query = (filters["filter[search]"] as string) || "";
const result = await getScans({
query,
page,
sort,
filters,
pageSize,
include: "provider",
});
if (result?.data) {
const expanded = expandScansWithProviderInfo(
result.data,
result.included,
);
setScansData(expanded);
if (result && "meta" in result) {
setMeta(result.meta as MetaDataProps);
}
}
}, [searchParams]);
// Listen for scan launch events to trigger an immediate refresh
useEffect(() => {
const handler = () => {
handleRefresh();
};
window.addEventListener(SCAN_LAUNCHED_EVENT, handler);
return () => window.removeEventListener(SCAN_LAUNCHED_EVENT, handler);
}, [handleRefresh]);
return (
<>
<AutoRefresh
hasExecutingScan={hasExecutingScan}
onRefresh={handleRefresh}
/>
<DataTable
key={`scans-${scansData.length}-${meta?.pagination?.page}`}
columns={ColumnGetScans}
data={scansData}
metadata={meta}
/>
</>
);
}
@@ -21,7 +21,9 @@ export const TableLink = ({ href, label, isDisabled }: TableLinkProps) => {
return (
<Button asChild variant="link" size="sm" className="text-xs">
<Link href={href}>{label}</Link>
<Link href={href} prefetch={false}>
{label}
</Link>
</Button>
);
};

Some files were not shown because too many files have changed in this diff Show More