Compare commits

...

25 Commits

Author SHA1 Message Date
Andoni A. 481a2defc9 poc: examples API token notebooks in DEV 2025-10-22 15:21:53 +02:00
Andoni A. 6716c4ae73 Merge branch 'master' into demo-api-key 2025-10-22 11:13:41 +02:00
César Arroba 18f3bc098c chore(github): trigger only if repository is prowler (#8974) 2025-10-22 09:27:33 +02:00
César Arroba 67b1983d85 chore(github): fix action (#8973) 2025-10-22 09:10:47 +02:00
Andoni A. 0585247890 poc: examples API token notebooks 2025-10-21 18:07:33 +02:00
César Arroba a3db23af7d chore(github): improve conventional commits action (#8969) 2025-10-21 17:57:29 +02:00
César Arroba 3eaa21f06f chore(github): improve backport label action (#8970) 2025-10-21 17:57:04 +02:00
Rubén De la Torre Vico 5d5c109067 chore(aws): enhance metadata for dlm service (#8860)
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2025-10-21 17:40:19 +02:00
César Arroba c6cb4e4814 chore(github): improve backport action (#8968) 2025-10-21 17:14:40 +02:00
César Arroba ab06a09173 chore(api): improve pull request action (#8963) 2025-10-21 17:10:48 +02:00
Rubén De la Torre Vico 9c6c007f73 fix(mcp): add missing argument to health check (#8967) 2025-10-21 16:45:05 +02:00
Rubén De la Torre Vico 206f23b5a5 chore(aws): enhance metadata for dms service (#8861)
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2025-10-21 16:31:18 +02:00
Andoni Alonso 5c9e9bc86a docs: fix security heading (#8965) 2025-10-21 16:13:55 +02:00
Rubén De la Torre Vico 34554d6123 feat(mcp): add support for production deployment with uvicorn (#8958) 2025-10-21 16:03:24 +02:00
Pepe Fagoaga 000cb93157 chore: remove security template as it's already there (#8964) 2025-10-21 19:34:42 +05:45
Adrián Jesús Peña Rodríguez 524209bdf2 feat(api): add provider_id__in filter for ScanSummary queries (#8951) 2025-10-21 15:24:09 +02:00
César Arroba c4a0da8204 chore(github): review and update issue templates (#8961) 2025-10-21 13:40:25 +02:00
César Arroba f0cba0321c chore(codeql): improve API CodeQL action and settings (#8962) 2025-10-21 13:40:07 +02:00
dependabot[bot] 79888c9312 chore(deps): bump playwright and @playwright/test in /ui (#8956)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-21 13:22:21 +02:00
Rubén De la Torre Vico a79910a694 chore(aws): enhance metadata for cloudtrail service (#8831)
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
2025-10-21 12:45:31 +02:00
César Arroba 4cadee7bb1 chore(github): update codeowners file (#8960) 2025-10-21 11:48:21 +02:00
Pedro Martín 756d436a2f feat(compliance): improve CCC catalogs (#8944) 2025-10-21 03:16:05 +02:00
Alejandro Bailo 5e85ef5835 feat(ui): new card components and derivates for overview (#8921)
Co-authored-by: Alan Buscaglia <gentlemanprogramming@gmail.com>
2025-10-20 16:49:09 +02:00
Prowler Bot 0fa9e2da6c chore(regions_update): Changes in regions for AWS services (#8946)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-10-20 09:20:29 -04:00
Andoni Alonso ce7510db28 docs: remove anchors from redirects (#8953) 2025-10-20 14:58:53 +02:00
73 changed files with 4631 additions and 7784 deletions
+27 -5
View File
@@ -1,6 +1,28 @@
# SDK
/* @prowler-cloud/sdk
/.github/ @prowler-cloud/sdk
prowler @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
tests @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
api @prowler-cloud/api
ui @prowler-cloud/ui
/prowler/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/tests/ @prowler-cloud/sdk @prowler-cloud/detection-and-remediation
/dashboard/ @prowler-cloud/sdk
/docs/ @prowler-cloud/sdk
/examples/ @prowler-cloud/sdk
/util/ @prowler-cloud/sdk
/contrib/ @prowler-cloud/sdk
/permissions/ @prowler-cloud/sdk
/codecov.yml @prowler-cloud/sdk @prowler-cloud/api
# API
/api/ @prowler-cloud/api
# UI
/ui/ @prowler-cloud/ui
# AI
/mcp_server/ @prowler-cloud/ai
# Platform
/.github/ @prowler-cloud/platform
/Makefile @prowler-cloud/platform
/kubernetes/ @prowler-cloud/platform
**/Dockerfile* @prowler-cloud/platform
**/docker-compose*.yml @prowler-cloud/platform
**/docker-compose*.yaml @prowler-cloud/platform
+44
View File
@@ -3,6 +3,41 @@ description: Create a report to help us improve
labels: ["bug", "status/needs-triage"]
body:
- type: checkboxes
id: search
attributes:
label: Issue search
options:
- label: I have searched the existing issues and this bug has not been reported yet
required: true
- type: dropdown
id: component
attributes:
label: Which component is affected?
multiple: true
options:
- Prowler CLI/SDK
- Prowler API
- Prowler UI
- Prowler Dashboard
- Prowler MCP Server
- Documentation
- Other
validations:
required: true
- type: dropdown
id: provider
attributes:
label: Cloud Provider (if applicable)
multiple: true
options:
- AWS
- Azure
- GCP
- Kubernetes
- GitHub
- Microsoft 365
- Not applicable
- type: textarea
id: reproduce
attributes:
@@ -78,6 +113,15 @@ body:
prowler --version
validations:
required: true
- type: input
id: python-version
attributes:
label: Python version
description: Which Python version are you using?
placeholder: |-
python --version
validations:
required: true
- type: input
id: pip-version
attributes:
+10
View File
@@ -1 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: 📖 Documentation
url: https://docs.prowler.com
about: Check our comprehensive documentation for guides and tutorials
- name: 💬 GitHub Discussions
url: https://github.com/prowler-cloud/prowler/discussions
about: Ask questions and discuss with the community
- name: 🌟 Prowler Community
url: https://goto.prowler.com/slack
about: Join our community for support and updates
@@ -3,6 +3,42 @@ description: Suggest an idea for this project
labels: ["feature-request", "status/needs-triage"]
body:
- type: checkboxes
id: search
attributes:
label: Feature search
options:
- label: I have searched the existing issues and this feature has not been requested yet
required: true
- type: dropdown
id: component
attributes:
label: Which component would this feature affect?
multiple: true
options:
- Prowler CLI/SDK
- Prowler API
- Prowler UI
- Prowler Dashboard
- Prowler MCP Server
- Documentation
- New component/Integration
validations:
required: true
- type: dropdown
id: provider
attributes:
label: Related to specific cloud provider?
multiple: true
options:
- AWS
- Azure
- GCP
- Kubernetes
- GitHub
- Microsoft 365
- All providers
- Not provider-specific
- type: textarea
id: Problem
attributes:
@@ -19,6 +55,14 @@ body:
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
id: use-case
attributes:
label: Use case and benefits
description: Who would benefit from this feature and how?
placeholder: This would help security teams by...
validations:
required: true
- type: textarea
id: Alternatives
attributes:
@@ -0,0 +1,71 @@
name: 'Setup Python with Poetry'
description: 'Setup Python environment with Poetry and install dependencies'
author: 'Prowler'
inputs:
python-version:
description: 'Python version to use'
required: true
working-directory:
description: 'Working directory for Poetry'
required: false
default: '.'
poetry-version:
description: 'Poetry version to install'
required: false
default: '2.1.1'
install-dependencies:
description: 'Install Python dependencies with Poetry'
required: false
default: 'true'
runs:
using: 'composite'
steps:
- name: Replace @master with current branch in pyproject.toml
if: github.event_name == 'pull_request' && github.base_ref == 'master'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
BRANCH_NAME="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
echo "Using branch: $BRANCH_NAME"
sed -i "s|@master|@$BRANCH_NAME|g" pyproject.toml
- name: Install poetry
shell: bash
run: |
python -m pip install --upgrade pip
pipx install poetry==${{ inputs.poetry-version }}
- name: Update SDK resolved_reference to latest commit
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
sed -i '/url = "https:\/\/github\.com\/prowler-cloud\/prowler\.git"/,/resolved_reference = / {
s/resolved_reference = "[a-f0-9]\{40\}"/resolved_reference = "'"$LATEST_COMMIT"'"/
}' poetry.lock
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update poetry.lock
shell: bash
working-directory: ${{ inputs.working-directory }}
run: poetry lock
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ inputs.python-version }}
cache: 'poetry'
cache-dependency-path: ${{ inputs.working-directory }}/poetry.lock
- name: Install Python dependencies
if: inputs.install-dependencies == 'true'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
poetry install --no-root
poetry run pip list
+152
View File
@@ -0,0 +1,152 @@
name: 'Container Security Scan with Trivy'
description: 'Scans container images for vulnerabilities using Trivy and reports results'
author: 'Prowler'
inputs:
image-name:
description: 'Container image name to scan'
required: true
image-tag:
description: 'Container image tag to scan'
required: true
default: ${{ github.sha }}
severity:
description: 'Severities to scan for (comma-separated)'
required: false
default: 'CRITICAL,HIGH,MEDIUM,LOW'
fail-on-critical:
description: 'Fail the build if critical vulnerabilities are found'
required: false
default: 'false'
upload-sarif:
description: 'Upload results to GitHub Security tab'
required: false
default: 'true'
create-pr-comment:
description: 'Create a comment on the PR with scan results'
required: false
default: 'true'
artifact-retention-days:
description: 'Days to retain the Trivy report artifact'
required: false
default: '2'
outputs:
critical-count:
description: 'Number of critical vulnerabilities found'
value: ${{ steps.security-check.outputs.critical }}
high-count:
description: 'Number of high vulnerabilities found'
value: ${{ steps.security-check.outputs.high }}
total-count:
description: 'Total number of vulnerabilities found'
value: ${{ steps.security-check.outputs.total }}
runs:
using: 'composite'
steps:
- name: Run Trivy vulnerability scan (SARIF)
if: inputs.upload-sarif == 'true'
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
with:
image-ref: ${{ inputs.image-name }}:${{ inputs.image-tag }}
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
exit-code: '0'
- name: Upload Trivy results to GitHub Security tab
if: inputs.upload-sarif == 'true'
uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
with:
sarif_file: 'trivy-results.sarif'
category: 'trivy-container'
- name: Run Trivy vulnerability scan (JSON)
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
with:
image-ref: ${{ inputs.image-name }}:${{ inputs.image-tag }}
format: 'json'
output: 'trivy-report.json'
severity: ${{ inputs.severity }}
exit-code: '0'
- name: Upload Trivy report artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: always()
with:
name: trivy-scan-report-${{ inputs.image-name }}
path: trivy-report.json
retention-days: ${{ inputs.artifact-retention-days }}
- name: Generate security summary
id: security-check
shell: bash
run: |
CRITICAL=$(jq '[.Results[]?.Vulnerabilities[]? | select(.Severity=="CRITICAL")] | length' trivy-report.json)
HIGH=$(jq '[.Results[]?.Vulnerabilities[]? | select(.Severity=="HIGH")] | length' trivy-report.json)
TOTAL=$(jq '[.Results[]?.Vulnerabilities[]?] | length' trivy-report.json)
echo "critical=$CRITICAL" >> $GITHUB_OUTPUT
echo "high=$HIGH" >> $GITHUB_OUTPUT
echo "total=$TOTAL" >> $GITHUB_OUTPUT
echo "### 🔒 Container Security Scan" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Image:** \`${{ inputs.image-name }}:${{ inputs.image-tag }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- 🔴 Critical: $CRITICAL" >> $GITHUB_STEP_SUMMARY
echo "- 🟠 High: $HIGH" >> $GITHUB_STEP_SUMMARY
echo "- **Total**: $TOTAL" >> $GITHUB_STEP_SUMMARY
- name: Comment scan results on PR
if: inputs.create-pr-comment == 'true' && github.event_name == 'pull_request'
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
env:
IMAGE_NAME: ${{ inputs.image-name }}
GITHUB_SHA: ${{ inputs.image-tag }}
SEVERITY: ${{ inputs.severity }}
with:
script: |
const comment = require('./.github/scripts/trivy-pr-comment.js');
// Unique identifier to find our comment
const marker = '<!-- trivy-scan-comment:${{ inputs.image-name }} -->';
const body = marker + '\n' + comment;
// Find existing comment
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existingComment = comments.find(c => c.body?.includes(marker));
if (existingComment) {
// Update existing comment
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: body
});
console.log('✅ Updated existing Trivy scan comment');
} else {
// Create new comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: body
});
console.log('✅ Created new Trivy scan comment');
}
- name: Check for critical vulnerabilities
if: inputs.fail-on-critical == 'true' && steps.security-check.outputs.critical != '0'
shell: bash
run: |
echo "::error::Found ${{ steps.security-check.outputs.critical }} critical vulnerabilities"
echo "::warning::Please update packages or use a different base image"
exit 1
+11 -2
View File
@@ -1,3 +1,12 @@
name: "API - CodeQL Config"
name: 'API: CodeQL Config'
paths:
- "api/"
- 'api/'
paths-ignore:
- 'api/tests/**'
- 'api/**/__pycache__/**'
- 'api/**/migrations/**'
- 'api/**/*.md'
queries:
- uses: security-and-quality
+102
View File
@@ -0,0 +1,102 @@
const fs = require('fs');
// Configuration from environment variables
const REPORT_FILE = process.env.TRIVY_REPORT_FILE || 'trivy-report.json';
const IMAGE_NAME = process.env.IMAGE_NAME || 'container-image';
const GITHUB_SHA = process.env.GITHUB_SHA || 'unknown';
const GITHUB_REPOSITORY = process.env.GITHUB_REPOSITORY || '';
const GITHUB_RUN_ID = process.env.GITHUB_RUN_ID || '';
const SEVERITY = process.env.SEVERITY || 'CRITICAL,HIGH,MEDIUM,LOW';
// Parse severities to scan
const scannedSeverities = SEVERITY.split(',').map(s => s.trim());
// Read and parse the Trivy report
const report = JSON.parse(fs.readFileSync(REPORT_FILE, 'utf-8'));
let vulnCount = 0;
let vulnsByType = { CRITICAL: 0, HIGH: 0, MEDIUM: 0, LOW: 0 };
let affectedPackages = new Set();
if (report.Results && Array.isArray(report.Results)) {
for (const result of report.Results) {
if (result.Vulnerabilities && Array.isArray(result.Vulnerabilities)) {
for (const vuln of result.Vulnerabilities) {
vulnCount++;
if (vulnsByType[vuln.Severity] !== undefined) {
vulnsByType[vuln.Severity]++;
}
if (vuln.PkgName) {
affectedPackages.add(vuln.PkgName);
}
}
}
}
}
const shortSha = GITHUB_SHA.substring(0, 7);
const timestamp = new Date().toISOString().replace('T', ' ').substring(0, 19) + ' UTC';
// Severity icons and labels
const severityConfig = {
CRITICAL: { icon: '🔴', label: 'Critical' },
HIGH: { icon: '🟠', label: 'High' },
MEDIUM: { icon: '🟡', label: 'Medium' },
LOW: { icon: '🔵', label: 'Low' }
};
let comment = '## 🔒 Container Security Scan\n\n';
comment += `**Image:** \`${IMAGE_NAME}:${shortSha}\`\n`;
comment += `**Last scan:** ${timestamp}\n\n`;
if (vulnCount === 0) {
comment += '### ✅ No Vulnerabilities Detected\n\n';
comment += 'The container image passed all security checks. No known CVEs were found.\n';
} else {
comment += '### 📊 Vulnerability Summary\n\n';
comment += '| Severity | Count |\n';
comment += '|----------|-------|\n';
// Only show severities that were scanned
for (const severity of scannedSeverities) {
const config = severityConfig[severity];
const count = vulnsByType[severity] || 0;
const isBold = (severity === 'CRITICAL' || severity === 'HIGH') && count > 0;
const countDisplay = isBold ? `**${count}**` : count;
comment += `| ${config.icon} ${config.label} | ${countDisplay} |\n`;
}
comment += `| **Total** | **${vulnCount}** |\n\n`;
if (affectedPackages.size > 0) {
comment += `**${affectedPackages.size}** package(s) affected\n\n`;
}
if (vulnsByType.CRITICAL > 0) {
comment += '### ⚠️ Action Required\n\n';
comment += '**Critical severity vulnerabilities detected.** These should be addressed before merging:\n';
comment += '- Review the detailed scan results\n';
comment += '- Update affected packages to patched versions\n';
comment += '- Consider using a different base image if updates are unavailable\n\n';
} else if (vulnsByType.HIGH > 0) {
comment += '### ⚠️ Attention Needed\n\n';
comment += '**High severity vulnerabilities found.** Please review and plan remediation:\n';
comment += '- Assess the risk and exploitability\n';
comment += '- Prioritize updates in the next maintenance cycle\n\n';
} else {
comment += '### ️ Review Recommended\n\n';
comment += 'Medium/Low severity vulnerabilities found. Consider addressing during regular maintenance.\n\n';
}
}
comment += '---\n';
comment += '📋 **Resources:**\n';
if (GITHUB_REPOSITORY && GITHUB_RUN_ID) {
comment += `- [Download full report](https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}) (see artifacts)\n`;
}
comment += '- [View in Security tab](https://github.com/' + (GITHUB_REPOSITORY || 'repository') + '/security/code-scanning)\n';
comment += '- Scanned with [Trivy](https://github.com/aquasecurity/trivy)\n';
module.exports = comment;
+30 -33
View File
@@ -1,36 +1,34 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: API - CodeQL
name: 'API: CodeQL'
on:
push:
branches:
- "master"
- "v5.*"
- 'master'
- 'v5.*'
paths:
- "api/**"
- 'api/**'
- '.github/workflows/api-codeql.yml'
- '.github/codeql/api-codeql-config.yml'
pull_request:
branches:
- "master"
- "v5.*"
- 'master'
- 'v5.*'
paths:
- "api/**"
- 'api/**'
- '.github/workflows/api-codeql.yml'
- '.github/codeql/api-codeql-config.yml'
schedule:
- cron: '00 12 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
analyze:
name: Analyze
name: CodeQL Security Analysis
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
actions: read
contents: read
@@ -39,21 +37,20 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
language:
- 'python'
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql/api-codeql-config.yml
- name: Initialize CodeQL
uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
with:
languages: ${{ matrix.language }}
config-file: ./.github/codeql/api-codeql-config.yml
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
with:
category: "/language:${{matrix.language}}"
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
with:
category: '/language:${{ matrix.language }}'
+148 -153
View File
@@ -1,20 +1,30 @@
name: API - Pull Request
name: 'API: Pull Request'
on:
push:
branches:
- "master"
- "v5.*"
- 'master'
- 'v5.*'
paths:
- ".github/workflows/api-pull-request.yml"
- "api/**"
- '.github/workflows/api-pull-request.yml'
- 'api/**'
- '!api/docs/**'
- '!api/README.md'
- '!api/CHANGELOG.md'
pull_request:
branches:
- "master"
- "v5.*"
- 'master'
- 'v5.*'
paths:
- ".github/workflows/api-pull-request.yml"
- "api/**"
- '.github/workflows/api-pull-request.yml'
- 'api/**'
- '!api/docs/**'
- '!api/README.md'
- '!api/CHANGELOG.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
POSTGRES_HOST: localhost
@@ -29,21 +39,94 @@ env:
VALKEY_DB: 0
API_WORKING_DIR: ./api
IMAGE_NAME: prowler-api
IGNORE_FILES: |
api/docs/**
api/README.md
api/CHANGELOG.md
jobs:
test:
code-quality:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
strategy:
matrix:
python-version: ["3.12"]
python-version:
- '3.12'
defaults:
run:
working-directory: ./api
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Python with Poetry
uses: ./.github/actions/setup-python-poetry
with:
python-version: ${{ matrix.python-version }}
working-directory: ./api
- name: Poetry check
run: poetry check --lock
- name: Ruff lint
run: poetry run ruff check . --exclude contrib
- name: Ruff format
run: poetry run ruff format --check . --exclude contrib
- name: Pylint
run: poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/
security-scans:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
strategy:
matrix:
python-version:
- '3.12'
defaults:
run:
working-directory: ./api
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Setup Python with Poetry
uses: ./.github/actions/setup-python-poetry
with:
python-version: ${{ matrix.python-version }}
working-directory: ./api
- name: Bandit
run: poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
- name: Safety
# 76352, 76353, 77323 come from SDK, but they cannot upgrade it yet. It does not affect API
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
run: poetry run safety check --ignore 70612,66963,74429,76352,76353,77323,77744,77745
- name: Vulture
run: poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 .
tests:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
strategy:
matrix:
python-version:
- '3.12'
defaults:
run:
working-directory: ./api
# Service containers to run with `test`
services:
# Label used to access the service container
postgres:
image: postgres
env:
@@ -52,7 +135,6 @@ jobs:
POSTGRES_USER: ${{ env.POSTGRES_USER }}
POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }}
POSTGRES_DB: ${{ env.POSTGRES_DB }}
# Set health checks to wait until postgres has started
ports:
- 5432:5432
options: >-
@@ -66,7 +148,6 @@ jobs:
VALKEY_HOST: ${{ env.VALKEY_HOST }}
VALKEY_PORT: ${{ env.VALKEY_PORT }}
VALKEY_DB: ${{ env.VALKEY_DB }}
# Set health checks to wait until postgres has started
ports:
- 6379:6379
options: >-
@@ -76,158 +157,72 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Test if changes are in not ignored paths
id: are-non-ignored-files-changed
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files: |
api/**
.github/workflows/api-pull-request.yml
files_ignore: ${{ env.IGNORE_FILES }}
- name: Replace @master with current branch in pyproject.toml - Only for pull requests to `master`
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' && github.event_name == 'pull_request' && github.base_ref == 'master'
run: |
BRANCH_NAME="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
echo "Using branch: $BRANCH_NAME"
sed -i "s|@master|@$BRANCH_NAME|g" pyproject.toml
- name: Install poetry
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
python -m pip install --upgrade pip
pipx install poetry==2.1.1
- name: Update SDK's poetry.lock resolved_reference to latest commit - Only for push events to `master`
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/master'
run: |
# Get the latest commit hash from the prowler-cloud/prowler repository
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
# Update the resolved_reference specifically for prowler-cloud/prowler repository
sed -i '/url = "https:\/\/github\.com\/prowler-cloud\/prowler\.git"/,/resolved_reference = / {
s/resolved_reference = "[a-f0-9]\{40\}"/resolved_reference = "'"$LATEST_COMMIT"'"/
}' poetry.lock
# Verify the change was made
echo "Updated resolved_reference:"
grep -A2 -B2 "resolved_reference" poetry.lock
- name: Update poetry.lock
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry lock
- name: Set up Python ${{ matrix.python-version }}
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
- name: Setup Python with Poetry
uses: ./.github/actions/setup-python-poetry
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
working-directory: ./api
- name: Install dependencies
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry install --no-root
poetry run pip list
VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
grep '"tag_name":' | \
sed -E 's/.*"v([^"]+)".*/\1/' \
) && curl -L -o /tmp/hadolint "https://github.com/hadolint/hadolint/releases/download/v${VERSION}/hadolint-Linux-x86_64" \
&& chmod +x /tmp/hadolint
- name: Poetry check
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry check --lock
- name: Lint with ruff
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run ruff check . --exclude contrib
- name: Check Format with ruff
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run ruff format --check . --exclude contrib
- name: Lint with pylint
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/
- name: Bandit
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
- name: Safety
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
# 76352, 76353, 77323 come from SDK, but they cannot upgrade it yet. It does not affect API
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
run: |
poetry run safety check --ignore 70612,66963,74429,76352,76353,77323,77744,77745
- name: Vulture
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 .
- name: Hadolint
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
/tmp/hadolint Dockerfile --ignore=DL3013
- name: Test with pytest
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run pytest --cov=./src/backend --cov-report=xml src/backend
- name: Run tests with pytest
run: poetry run pytest --cov=./src/backend --cov-report=xml src/backend
- name: Upload coverage reports to Codecov
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: api
test-container-build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Test if changes are in not ignored paths
id: are-non-ignored-files-changed
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
dockerfile-lint:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Lint Dockerfile with Hadolint
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
with:
files: api/**
files_ignore: ${{ env.IGNORE_FILES }}
dockerfile: api/Dockerfile
ignore: DL3013
container-build-and-scan:
if: github.repository == 'prowler-cloud/prowler'
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
security-events: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Docker Buildx
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Build Container
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
- name: Build container
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
with:
context: ${{ env.API_WORKING_DIR }}
push: false
tags: ${{ env.IMAGE_NAME }}:latest
outputs: type=docker
load: true
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Scan container with Trivy
uses: ./.github/actions/trivy-scan
with:
image-name: ${{ env.IMAGE_NAME }}
image-tag: ${{ github.sha }}
fail-on-critical: 'false'
severity: 'CRITICAL'
+22 -15
View File
@@ -1,28 +1,35 @@
name: Prowler - Automatic Backport
name: 'Tools: Backport'
on:
pull_request_target:
branches: ['master']
types: ['labeled', 'closed']
branches:
- 'master'
types:
- 'labeled'
- 'closed'
paths:
- '.github/workflows/backport.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: false
env:
# The prefix of the label that triggers the backport must not contain the branch name
# so, for example, if the branch is 'master', the label should be 'backport-to-<branch>'
BACKPORT_LABEL_PREFIX: backport-to-
BACKPORT_LABEL_IGNORE: was-backported
jobs:
backport:
name: Backport PR
if: github.event.pull_request.merged == true && !(contains(github.event.pull_request.labels.*.name, 'backport')) && !(contains(github.event.pull_request.labels.*.name, 'was-backported'))
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
id-token: write
pull-requests: write
contents: write
pull-requests: write
steps:
- name: Check labels
id: preview_label_check
id: label_check
uses: agilepathway/label-checker@c3d16ad512e7cea5961df85ff2486bb774caf3c5 # v1.6.65
with:
allow_failure: true
@@ -31,17 +38,17 @@ jobs:
none_of: ${{ env.BACKPORT_LABEL_IGNORE }}
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Backport Action
if: steps.preview_label_check.outputs.label_check == 'success'
- name: Backport PR
if: steps.label_check.outputs.label_check == 'success'
uses: sorenlouv/backport-github-action@ad888e978060bc1b2798690dd9d03c4036560947 # v9.5.1
with:
github_token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
auto_backport_label_prefix: ${{ env.BACKPORT_LABEL_PREFIX }}
- name: Info log
if: ${{ success() && steps.preview_label_check.outputs.label_check == 'success' }}
- name: Display backport info log
if: success() && steps.label_check.outputs.label_check == 'success'
run: cat ~/.backport/backport.info.log
- name: Debug log
if: ${{ failure() && steps.preview_label_check.outputs.label_check == 'success' }}
- name: Display backport debug log
if: failure() && steps.label_check.outputs.label_check == 'success'
run: cat ~/.backport/backport.debug.log
+20 -13
View File
@@ -1,24 +1,31 @@
name: Prowler - Conventional Commit
name: 'Tools: Conventional Commit'
on:
pull_request:
types:
- "opened"
- "edited"
- "synchronize"
branches:
- "master"
- "v3"
- "v4.*"
- "v5.*"
- 'master'
- 'v3'
- 'v4.*'
- 'v5.*'
types:
- 'opened'
- 'edited'
- 'synchronize'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
conventional-commit-check:
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
pull-requests: read
steps:
- name: conventional-commit-check
id: conventional-commit-check
- name: Check PR title format
uses: agenthunt/conventional-commit-checker-action@9e552d650d0e205553ec7792d447929fc78e012b # v2.0.0
with:
pr-title-regex: '^(feat|fix|docs|style|refactor|perf|test|chore|build|ci|revert)(\([^)]+\))?!?: .+'
pr-title-regex: '^(feat|fix|docs|style|refactor|perf|test|chore|build|ci|revert)(\([^)]+\))?!?: .+'
+49 -46
View File
@@ -1,67 +1,70 @@
name: Prowler - Create Backport Label
name: 'Tools: Backport Label'
on:
release:
types: [published]
types:
- 'published'
concurrency:
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
cancel-in-progress: false
env:
BACKPORT_LABEL_PREFIX: backport-to-
BACKPORT_LABEL_COLOR: B60205
jobs:
create_label:
create-label:
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: write
contents: read
issues: write
steps:
- name: Create backport label
- name: Create backport label for minor releases
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_TAG: ${{ github.event.release.tag_name }}
OWNER_REPO: ${{ github.repository }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION_ONLY=${RELEASE_TAG#v} # Remove 'v' prefix if present (e.g., v3.2.0 -> 3.2.0)
RELEASE_TAG="${{ github.event.release.tag_name }}"
if [ -z "$RELEASE_TAG" ]; then
echo "Error: No release tag provided"
exit 1
fi
echo "Processing release tag: $RELEASE_TAG"
# Remove 'v' prefix if present (e.g., v3.2.0 -> 3.2.0)
VERSION_ONLY="${RELEASE_TAG#v}"
# Check if it's a minor version (X.Y.0)
if [[ "$VERSION_ONLY" =~ ^[0-9]+\.[0-9]+\.0$ ]]; then
echo "Release ${RELEASE_TAG} (version ${VERSION_ONLY}) is a minor version. Proceeding to create backport label."
if [[ "$VERSION_ONLY" =~ ^([0-9]+)\.([0-9]+)\.0$ ]]; then
echo "Release $RELEASE_TAG (version $VERSION_ONLY) is a minor version. Proceeding to create backport label."
TWO_DIGIT_VERSION=${VERSION_ONLY%.0} # Extract X.Y from X.Y.0 (e.g., 5.6 from 5.6.0)
# Extract X.Y from X.Y.0 (e.g., 5.6 from 5.6.0)
MAJOR="${BASH_REMATCH[1]}"
MINOR="${BASH_REMATCH[2]}"
TWO_DIGIT_VERSION="${MAJOR}.${MINOR}"
FINAL_LABEL_NAME="backport-to-v${TWO_DIGIT_VERSION}"
FINAL_DESCRIPTION="Backport PR to the v${TWO_DIGIT_VERSION} branch"
LABEL_NAME="${BACKPORT_LABEL_PREFIX}v${TWO_DIGIT_VERSION}"
LABEL_DESC="Backport PR to the v${TWO_DIGIT_VERSION} branch"
LABEL_COLOR="$BACKPORT_LABEL_COLOR"
echo "Effective label name will be: ${FINAL_LABEL_NAME}"
echo "Effective description will be: ${FINAL_DESCRIPTION}"
echo "Label name: $LABEL_NAME"
echo "Label description: $LABEL_DESC"
# Check if the label already exists
STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/${OWNER_REPO}/labels/${FINAL_LABEL_NAME}")
if [ "${STATUS_CODE}" -eq 200 ]; then
echo "Label '${FINAL_LABEL_NAME}' already exists."
elif [ "${STATUS_CODE}" -eq 404 ]; then
echo "Label '${FINAL_LABEL_NAME}' does not exist. Creating it..."
# Prepare JSON data payload
JSON_DATA=$(printf '{"name":"%s","description":"%s","color":"B60205"}' "${FINAL_LABEL_NAME}" "${FINAL_DESCRIPTION}")
CREATE_STATUS_CODE=$(curl -s -o /tmp/curl_create_response.json -w "%{http_code}" -X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${GITHUB_TOKEN}" \
--data "${JSON_DATA}" \
"https://api.github.com/repos/${OWNER_REPO}/labels")
CREATE_RESPONSE_BODY=$(cat /tmp/curl_create_response.json)
rm -f /tmp/curl_create_response.json
if [ "$CREATE_STATUS_CODE" -eq 201 ]; then
echo "Label '${FINAL_LABEL_NAME}' created successfully."
else
echo "Error creating label '${FINAL_LABEL_NAME}'. Status: $CREATE_STATUS_CODE"
echo "Response: $CREATE_RESPONSE_BODY"
exit 1
fi
# Check if label already exists
if gh label list --repo ${{ github.repository }} --limit 1000 | grep -q "^${LABEL_NAME}[[:space:]]"; then
echo "Label '$LABEL_NAME' already exists."
else
echo "Error checking for label '${FINAL_LABEL_NAME}'. HTTP Status: ${STATUS_CODE}"
exit 1
echo "Label '$LABEL_NAME' does not exist. Creating it..."
gh label create "$LABEL_NAME" \
--description "$LABEL_DESC" \
--color "$LABEL_COLOR" \
--repo ${{ github.repository }}
echo "Label '$LABEL_NAME' created successfully."
fi
else
echo "Release ${RELEASE_TAG} (version ${VERSION_ONLY}) is not a minor version. Skipping backport label creation."
exit 0
echo "Release $RELEASE_TAG (version $VERSION_ONLY) is not a minor version. Skipping backport label creation."
fi
+1 -1
View File
@@ -1,4 +1,4 @@
name: Prowler - Find secrets
name: 'Tools: TruffleHog'
on: pull_request
+1
View File
@@ -14,6 +14,7 @@ All notable changes to the **Prowler API** are documented in this file.
- Support for `passed_findings` and `total_findings` fields in compliance requirement overview for accurate Prowler ThreatScore calculation [(#8582)](https://github.com/prowler-cloud/prowler/pull/8582)
- Database read replica support [(#8869)](https://github.com/prowler-cloud/prowler/pull/8869)
- Support Common Cloud Controls for AWS, Azure and GCP [(#8000)](https://github.com/prowler-cloud/prowler/pull/8000)
- Add `provider_id__in` filter support to findings and findings severity overview endpoints [(#8951)](https://github.com/prowler-cloud/prowler/pull/8951)
### Changed
- Now the MANAGE_ACCOUNT permission is required to modify or read user permissions instead of MANAGE_USERS [(#8281)](https://github.com/prowler-cloud/prowler/pull/8281)
+1
View File
@@ -765,6 +765,7 @@ class ComplianceOverviewFilter(FilterSet):
class ScanSummaryFilter(FilterSet):
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
provider_id = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
provider_id__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
provider_type = ChoiceFilter(
field_name="scan__provider__provider", choices=Provider.ProviderChoices.choices
)
+30
View File
@@ -3611,6 +3611,16 @@ paths:
schema:
type: string
format: uuid
- in: query
name: filter[provider_id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
@@ -3778,6 +3788,16 @@ paths:
schema:
type: string
format: uuid
- in: query
name: filter[provider_id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
@@ -3980,6 +4000,16 @@ paths:
schema:
type: string
format: uuid
- in: query
name: filter[provider_id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
+166
View File
@@ -46,6 +46,7 @@ from api.models import (
SAMLConfiguration,
SAMLToken,
Scan,
ScanSummary,
StateChoices,
Task,
TenantAPIKey,
@@ -5766,6 +5767,171 @@ class TestOverviewViewSet:
assert service1_data["attributes"]["muted"] == 1
assert service2_data["attributes"]["muted"] == 0
def test_overview_findings_provider_id_in_filter(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
scan1 = Scan.objects.create(
name="scan-one",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
)
scan2 = Scan.objects.create(
name="scan-two",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
)
ScanSummary.objects.create(
tenant=tenant,
scan=scan1,
check_id="check-provider-one",
service="service-a",
severity="high",
region="region-a",
_pass=5,
fail=1,
muted=2,
total=8,
new=5,
changed=2,
unchanged=1,
fail_new=1,
fail_changed=0,
pass_new=3,
pass_changed=2,
muted_new=1,
muted_changed=1,
)
ScanSummary.objects.create(
tenant=tenant,
scan=scan2,
check_id="check-provider-two",
service="service-b",
severity="medium",
region="region-b",
_pass=2,
fail=3,
muted=1,
total=6,
new=3,
changed=2,
unchanged=1,
fail_new=2,
fail_changed=1,
pass_new=1,
pass_changed=1,
muted_new=1,
muted_changed=0,
)
single_response = authenticated_client.get(
reverse("overview-findings"),
{"filter[provider_id__in]": str(provider1.id)},
)
assert single_response.status_code == status.HTTP_200_OK
single_attributes = single_response.json()["data"]["attributes"]
assert single_attributes["pass"] == 5
assert single_attributes["fail"] == 1
assert single_attributes["muted"] == 2
assert single_attributes["total"] == 8
combined_response = authenticated_client.get(
reverse("overview-findings"),
{"filter[provider_id__in]": f"{provider1.id},{provider2.id}"},
)
assert combined_response.status_code == status.HTTP_200_OK
combined_attributes = combined_response.json()["data"]["attributes"]
assert combined_attributes["pass"] == 7
assert combined_attributes["fail"] == 4
assert combined_attributes["muted"] == 3
assert combined_attributes["total"] == 14
def test_overview_findings_severity_provider_id_in_filter(
self, authenticated_client, tenants_fixture, providers_fixture
):
tenant = tenants_fixture[0]
provider1, provider2, *_ = providers_fixture
scan1 = Scan.objects.create(
name="severity-scan-one",
provider=provider1,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
)
scan2 = Scan.objects.create(
name="severity-scan-two",
provider=provider2,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
)
ScanSummary.objects.create(
tenant=tenant,
scan=scan1,
check_id="severity-check-one",
service="service-a",
severity="high",
region="region-a",
_pass=4,
fail=4,
muted=0,
total=8,
)
ScanSummary.objects.create(
tenant=tenant,
scan=scan1,
check_id="severity-check-two",
service="service-a",
severity="medium",
region="region-b",
_pass=2,
fail=2,
muted=0,
total=4,
)
ScanSummary.objects.create(
tenant=tenant,
scan=scan2,
check_id="severity-check-three",
service="service-b",
severity="critical",
region="region-c",
_pass=1,
fail=2,
muted=0,
total=3,
)
single_response = authenticated_client.get(
reverse("overview-findings_severity"),
{"filter[provider_id__in]": str(provider1.id)},
)
assert single_response.status_code == status.HTTP_200_OK
single_attributes = single_response.json()["data"]["attributes"]
assert single_attributes["high"] == 8
assert single_attributes["medium"] == 4
assert single_attributes["critical"] == 0
combined_response = authenticated_client.get(
reverse("overview-findings_severity"),
{"filter[provider_id__in]": f"{provider1.id},{provider2.id}"},
)
assert combined_response.status_code == status.HTTP_200_OK
combined_attributes = combined_response.json()["data"]["attributes"]
assert combined_attributes["high"] == 8
assert combined_attributes["medium"] == 4
assert combined_attributes["critical"] == 3
@pytest.mark.django_db
class TestScheduleViewSet:
-8
View File
@@ -404,14 +404,6 @@
"source": "/projects/prowler-open-source/en/latest/tutorials/gcp/getting-started-gcp",
"destination": "/user-guide/providers/gcp/getting-started-gcp"
},
{
"source": "/projects/prowler-open-source/en/latest/tutorials/prowler-app",
"destination": "/user-guide/tutorials/prowler-app#step-4-4%3A-kubernetes-credentials%3A"
},
{
"source": "/projects/prowler-open-source/en/latest/tutorials/prowler-app/#step-3-add-a-provider",
"destination": "/user-guide/tutorials/prowler-app#step-3-add-a-provider"
},
{
"source": "/projects/prowler-open-source/en/latest/tutorials/microsoft365/getting-started-m365",
"destination": "/user-guide/providers/microsoft365/getting-started-m365"
@@ -182,19 +182,19 @@ Configure the server using environment variables:
|----------|-------------|----------|---------|
| `PROWLER_APP_API_KEY` | Prowler API key | Only for STDIO mode | - |
| `PROWLER_API_BASE_URL` | Custom Prowler API endpoint | No | `https://api.prowler.com` |
| `PROWLER_MCP_MODE` | Default transport mode (overwritten by `--transport` argument) | No | `stdio` |
| `PROWLER_MCP_TRANSPORT_MODE` | Default transport mode (overwritten by `--transport` argument) | No | `stdio` |
<CodeGroup>
```bash macOS/Linux
export PROWLER_APP_API_KEY="pk_your_api_key_here"
export PROWLER_API_BASE_URL="https://api.prowler.com"
export PROWLER_MCP_MODE="http"
export PROWLER_MCP_TRANSPORT_MODE="http"
```
```bash Windows PowerShell
$env:PROWLER_APP_API_KEY="pk_your_api_key_here"
$env:PROWLER_API_BASE_URL="https://api.prowler.com"
$env:PROWLER_MCP_MODE="http"
$env:PROWLER_MCP_TRANSPORT_MODE="http"
```
</CodeGroup>
@@ -209,7 +209,7 @@ For convenience, create a `.env` file in the `mcp_server` directory:
```bash .env
PROWLER_APP_API_KEY=pk_your_api_key_here
PROWLER_API_BASE_URL=https://api.prowler.com
PROWLER_MCP_MODE=stdio
PROWLER_MCP_TRANSPORT_MODE=stdio
```
When using Docker, pass the environment file:
@@ -228,6 +228,35 @@ uvx /path/to/prowler/mcp_server/
This is particularly useful when configuring MCP clients that need to launch the server from a specific path.
## Production Deployment
For production deployments that require customization, it is recommended to use the ASGI application that can be found in `prowler_mcp_server.server`. This can be run with uvicorn:
```bash
uvicorn prowler_mcp_server.server:app --host 0.0.0.0 --port 8000
```
For more details on production deployment options, see the [FastMCP production deployment guide](https://gofastmcp.com/deployment/http#production-deployment) and [uvicorn settings](https://www.uvicorn.org/settings/).
### Entrypoint Script
The source tree includes `entrypoint.sh` to simplify switching between the
standard CLI runner and the ASGI app. The first argument selects the mode and
any additional flags are passed straight through:
```bash
# Default CLI experience (prowler-mcp console script)
./entrypoint.sh main --transport http --host 0.0.0.0
# ASGI app via uvicorn
./entrypoint.sh uvicorn --host 0.0.0.0 --port 9000
```
Omitting the mode defaults to `main`, matching the `prowler-mcp` console script.
When `uvicorn` mode is selected, the script exports `PROWLER_MCP_TRANSPORT_MODE=http` automatically.
This is the default entrypoint for the Docker container.
## Next Steps
Now that you have the Prowler MCP Server installed, proceed to configure your MCP client:
+1 -1
View File
@@ -16,7 +16,7 @@ We use encryption everywhere possible. The data and communications used by **Pro
Prowler Cloud is GDPR compliant in regards to personal data and the ["right to be forgotten"](https://gdpr.eu/right-to-be-forgotten/). When a user deletes their account their user information will be deleted from Prowler Cloud online and backup systems within 10 calendar days.
## Software Security
## Software Security
We follow a **security-by-design approach** throughout our software development lifecycle. All changes go through automated checks at every stage, from local development to production deployment.
+340
View File
@@ -0,0 +1,340 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "cell-0",
"metadata": {},
"source": [
"# Prowler API Examples"
]
},
{
"cell_type": "markdown",
"id": "cell-1",
"metadata": {},
"source": [
"## 1. Create a New API Key\n",
"Follow the instructions in the [Prowler documentation](https://docs.prowler.com/user-guide/providers/prowler-app-api-keys#creating-api-keys) to create a new API key."
]
},
{
"cell_type": "markdown",
"id": "cell-2",
"metadata": {},
"source": [
"## 2. Set the API Key\n",
"\n",
"Create a `.envrc` file in this directory with your API key:\n",
"```bash\n",
"export PROWLER_API_KEY=your-api-key-here\n",
"```\n",
"\n",
"Then load the environment variable:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cell-3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key loaded from .envrc file\n"
]
}
],
"source": [
"%%bash\n",
"source .envrc\n",
"echo \"API key loaded from .envrc file\""
]
},
{
"cell_type": "markdown",
"id": "cell-4",
"metadata": {},
"source": [
"## 3. Use the API Key"
]
},
{
"cell_type": "markdown",
"id": "cell-5",
"metadata": {},
"source": [
"### List Providers"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cell-6",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 1398 100 1398 0 0 2723 0 --:--:-- --:--:-- --:--:-- 2719\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"links\": {\n",
" \"first\": \"https://api.dev.prowler.com/api/v1/providers?page%5Bnumber%5D=1\",\n",
" \"last\": \"https://api.dev.prowler.com/api/v1/providers?page%5Bnumber%5D=1\",\n",
" \"next\": null,\n",
" \"prev\": null\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"providers\",\n",
" \"id\": \"46a9435b-a2f6-44ee-b267-710f8a403ace\",\n",
" \"attributes\": {\n",
" \"inserted_at\": \"2025-10-22T07:46:12.089159Z\",\n",
" \"updated_at\": \"2025-10-22T08:00:04.923849Z\",\n",
" \"provider\": \"aws\",\n",
" \"uid\": \"552455647653\",\n",
" \"alias\": \"Prowler Demo\",\n",
" \"connection\": {\n",
" \"connected\": true,\n",
" \"last_checked_at\": \"2025-10-22T08:00:04.918455Z\"\n",
" }\n",
" },\n",
" \"relationships\": {\n",
" \"secret\": {\n",
" \"data\": {\n",
" \"type\": \"provider-secrets\",\n",
" \"id\": \"49fafccd-348f-47c1-9147-3494903290ff\"\n",
" }\n",
" },\n",
" \"provider_groups\": {\n",
" \"meta\": {\n",
" \"count\": 0\n",
" },\n",
" \"data\": []\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/providers/46a9435b-a2f6-44ee-b267-710f8a403ace\"\n",
" }\n",
" },\n",
" {\n",
" \"type\": \"providers\",\n",
" \"id\": \"9e499bb1-03fd-4588-b58a-75ef71334040\",\n",
" \"attributes\": {\n",
" \"inserted_at\": \"2025-10-22T07:42:53.856378Z\",\n",
" \"updated_at\": \"2025-10-22T07:46:15.197305Z\",\n",
" \"provider\": \"github\",\n",
" \"uid\": \"PieceOfM1nd\",\n",
" \"alias\": \"PieceOfM1nd (org)\",\n",
" \"connection\": {\n",
" \"connected\": true,\n",
" \"last_checked_at\": \"2025-10-22T07:46:15.191930Z\"\n",
" }\n",
" },\n",
" \"relationships\": {\n",
" \"secret\": {\n",
" \"data\": {\n",
" \"type\": \"provider-secrets\",\n",
" \"id\": \"3aa2c451-34e3-48c4-a0d6-db532181c892\"\n",
" }\n",
" },\n",
" \"provider_groups\": {\n",
" \"meta\": {\n",
" \"count\": 0\n",
" },\n",
" \"data\": []\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/providers/9e499bb1-03fd-4588-b58a-75ef71334040\"\n",
" }\n",
" }\n",
" ],\n",
" \"meta\": {\n",
" \"pagination\": {\n",
" \"page\": 1,\n",
" \"pages\": 1,\n",
" \"count\": 2\n",
" },\n",
" \"version\": \"v1\"\n",
" }\n",
"}\n"
]
}
],
"source": [
"%%bash\n",
"source .envrc\n",
"\n",
"curl -X GET 'https://api.dev.prowler.com/api/v1/providers' \\\n",
" -H \"Authorization: Api-Key ${PROWLER_API_KEY}\" \\\n",
" -H 'Content-Type: application/vnd.api+json' \\\n",
" | jq ."
]
},
{
"cell_type": "markdown",
"id": "flvlv96i2r7",
"metadata": {},
"source": [
"### Get One Critical Finding from Latest Scan"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "xkmbmm7rywb",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 2465 100 2465 0 0 4558 0 --:--:-- --:--:-- --:--:-- 4556\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"links\": {\n",
" \"first\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=1&page%5Bsize%5D=1\",\n",
" \"last\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=138&page%5Bsize%5D=1\",\n",
" \"next\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=2&page%5Bsize%5D=1\",\n",
" \"prev\": null\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"findings\",\n",
" \"id\": \"019a0af5-e65a-7735-8223-e9aa5ab4c58e\",\n",
" \"attributes\": {\n",
" \"uid\": \"prowler-aws-ssm_document_secrets-552455647653-us-east-1-SSM-SessionManagerRunShell\",\n",
" \"delta\": \"new\",\n",
" \"status\": \"PASS\",\n",
" \"status_extended\": \"No secrets found in SSM Document SSM-SessionManagerRunShell.\",\n",
" \"severity\": \"critical\",\n",
" \"check_id\": \"ssm_document_secrets\",\n",
" \"check_metadata\": {\n",
" \"risk\": \"Secrets hardcoded into SSM Documents by malware and bad actors to gain lateral access to other services.\",\n",
" \"notes\": \"\",\n",
" \"checkid\": \"ssm_document_secrets\",\n",
" \"provider\": \"aws\",\n",
" \"severity\": \"critical\",\n",
" \"checktype\": [],\n",
" \"dependson\": [],\n",
" \"relatedto\": [],\n",
" \"categories\": [\n",
" \"secrets\"\n",
" ],\n",
" \"checktitle\": \"Find secrets in SSM Documents.\",\n",
" \"compliance\": [],\n",
" \"relatedurl\": \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html\",\n",
" \"description\": \"Find secrets in SSM Documents.\",\n",
" \"remediation\": {\n",
" \"code\": {\n",
" \"cli\": \"\",\n",
" \"other\": \"\",\n",
" \"nativeiac\": \"\",\n",
" \"terraform\": \"\"\n",
" },\n",
" \"recommendation\": {\n",
" \"url\": \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html\",\n",
" \"text\": \"Implement automated detective control (e.g. using tools like Prowler) to scan accounts for passwords and secrets. Use Secrets Manager service to store and retrieve passwords and secrets.\"\n",
" }\n",
" },\n",
" \"servicename\": \"ssm\",\n",
" \"checkaliases\": [],\n",
" \"resourcetype\": \"AwsSsmDocument\",\n",
" \"additionalurls\": [],\n",
" \"subservicename\": \"\",\n",
" \"resourceidtemplate\": \"arn:aws:ssm:region:account-id:document/document-name\"\n",
" },\n",
" \"raw_result\": {},\n",
" \"inserted_at\": \"2025-10-22T08:07:58.811165Z\",\n",
" \"updated_at\": \"2025-10-22T08:07:58.818764Z\",\n",
" \"first_seen_at\": \"2025-10-22T08:07:58.808707Z\",\n",
" \"muted\": false,\n",
" \"muted_reason\": null\n",
" },\n",
" \"relationships\": {\n",
" \"scan\": {\n",
" \"data\": {\n",
" \"type\": \"scans\",\n",
" \"id\": \"019a0aec-edbc-7815-8476-b4efb7a4059a\"\n",
" }\n",
" },\n",
" \"resources\": {\n",
" \"meta\": {\n",
" \"count\": 1\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"resources\",\n",
" \"id\": \"f1f89f3d-22f8-43f6-a912-4edb23a7522a\"\n",
" }\n",
" ]\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/findings/019a0af5-e65a-7735-8223-e9aa5ab4c58e\"\n",
" }\n",
" }\n",
" ],\n",
" \"meta\": {\n",
" \"pagination\": {\n",
" \"page\": 1,\n",
" \"pages\": 138,\n",
" \"count\": 138\n",
" },\n",
" \"version\": \"v1\"\n",
" }\n",
"}\n"
]
}
],
"source": [
"%%bash\n",
"source .envrc\n",
"\n",
"curl -X GET 'https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bsize%5D=1' \\\n",
" -H \"Authorization: Api-Key ${PROWLER_API_KEY}\" \\\n",
" -H 'Content-Type: application/vnd.api+json' \\\n",
" | jq ."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "prowler-6nYzd6ct-py3.12",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
+317
View File
@@ -0,0 +1,317 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Prowler API Examples"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Create a New API key\n",
"Follow the instructions in the [Prowler documentation](https://docs.prowler.com/user-guide/providers/prowler-app-api-keys#creating-api-keys) to create a new API key."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Set the API key\n",
"\n",
"You can set your API key as an environment variable like this:\n",
"```bash\n",
"export PROWLER_API_KEY=<your-api-key>\n",
"```\n",
"\n",
"But, for security reasons, in this notebook we will use the `getpass` library to ask for the API key."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"\n",
"prowler_api_key = getpass.getpass(\"Enter your Prowler API key: \")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Use the API key"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### List Providers"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"links\": {\n",
" \"first\": \"https://api.dev.prowler.com/api/v1/providers?page%5Bnumber%5D=1\",\n",
" \"last\": \"https://api.dev.prowler.com/api/v1/providers?page%5Bnumber%5D=1\",\n",
" \"next\": null,\n",
" \"prev\": null\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"providers\",\n",
" \"id\": \"46a9435b-a2f6-44ee-b267-710f8a403ace\",\n",
" \"attributes\": {\n",
" \"inserted_at\": \"2025-10-22T07:46:12.089159Z\",\n",
" \"updated_at\": \"2025-10-22T08:00:04.923849Z\",\n",
" \"provider\": \"aws\",\n",
" \"uid\": \"552455647653\",\n",
" \"alias\": \"Prowler Demo\",\n",
" \"connection\": {\n",
" \"connected\": true,\n",
" \"last_checked_at\": \"2025-10-22T08:00:04.918455Z\"\n",
" }\n",
" },\n",
" \"relationships\": {\n",
" \"secret\": {\n",
" \"data\": {\n",
" \"type\": \"provider-secrets\",\n",
" \"id\": \"49fafccd-348f-47c1-9147-3494903290ff\"\n",
" }\n",
" },\n",
" \"provider_groups\": {\n",
" \"meta\": {\n",
" \"count\": 0\n",
" },\n",
" \"data\": []\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/providers/46a9435b-a2f6-44ee-b267-710f8a403ace\"\n",
" }\n",
" },\n",
" {\n",
" \"type\": \"providers\",\n",
" \"id\": \"9e499bb1-03fd-4588-b58a-75ef71334040\",\n",
" \"attributes\": {\n",
" \"inserted_at\": \"2025-10-22T07:42:53.856378Z\",\n",
" \"updated_at\": \"2025-10-22T07:46:15.197305Z\",\n",
" \"provider\": \"github\",\n",
" \"uid\": \"PieceOfM1nd\",\n",
" \"alias\": \"PieceOfM1nd (org)\",\n",
" \"connection\": {\n",
" \"connected\": true,\n",
" \"last_checked_at\": \"2025-10-22T07:46:15.191930Z\"\n",
" }\n",
" },\n",
" \"relationships\": {\n",
" \"secret\": {\n",
" \"data\": {\n",
" \"type\": \"provider-secrets\",\n",
" \"id\": \"3aa2c451-34e3-48c4-a0d6-db532181c892\"\n",
" }\n",
" },\n",
" \"provider_groups\": {\n",
" \"meta\": {\n",
" \"count\": 0\n",
" },\n",
" \"data\": []\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/providers/9e499bb1-03fd-4588-b58a-75ef71334040\"\n",
" }\n",
" }\n",
" ],\n",
" \"meta\": {\n",
" \"pagination\": {\n",
" \"page\": 1,\n",
" \"pages\": 1,\n",
" \"count\": 2\n",
" },\n",
" \"version\": \"v1\"\n",
" }\n",
"}\n"
]
}
],
"source": [
"import requests\n",
"import os\n",
"import json\n",
"\n",
"headers = {\n",
" 'Authorization': f'Api-Key {prowler_api_key}',\n",
" 'Content-Type': 'application/vnd.api+json'\n",
"}\n",
"\n",
"response = requests.get(\n",
" 'https://api.dev.prowler.com/api/v1/providers',\n",
" headers=headers\n",
")\n",
"\n",
"# Pretty-print the JSON response\n",
"print(json.dumps(response.json(), indent=2))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get One Critical Finding from Latest Scan"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"links\": {\n",
" \"first\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=1&page%5Bsize%5D=1\",\n",
" \"last\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=138&page%5Bsize%5D=1\",\n",
" \"next\": \"https://api.dev.prowler.com/api/v1/findings/latest?filter%5Bseverity__in%5D=critical&page%5Bnumber%5D=2&page%5Bsize%5D=1\",\n",
" \"prev\": null\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"findings\",\n",
" \"id\": \"019a0af5-e65a-7735-8223-e9aa5ab4c58e\",\n",
" \"attributes\": {\n",
" \"uid\": \"prowler-aws-ssm_document_secrets-552455647653-us-east-1-SSM-SessionManagerRunShell\",\n",
" \"delta\": \"new\",\n",
" \"status\": \"PASS\",\n",
" \"status_extended\": \"No secrets found in SSM Document SSM-SessionManagerRunShell.\",\n",
" \"severity\": \"critical\",\n",
" \"check_id\": \"ssm_document_secrets\",\n",
" \"check_metadata\": {\n",
" \"risk\": \"Secrets hardcoded into SSM Documents by malware and bad actors to gain lateral access to other services.\",\n",
" \"notes\": \"\",\n",
" \"checkid\": \"ssm_document_secrets\",\n",
" \"provider\": \"aws\",\n",
" \"severity\": \"critical\",\n",
" \"checktype\": [],\n",
" \"dependson\": [],\n",
" \"relatedto\": [],\n",
" \"categories\": [\n",
" \"secrets\"\n",
" ],\n",
" \"checktitle\": \"Find secrets in SSM Documents.\",\n",
" \"compliance\": [],\n",
" \"relatedurl\": \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html\",\n",
" \"description\": \"Find secrets in SSM Documents.\",\n",
" \"remediation\": {\n",
" \"code\": {\n",
" \"cli\": \"\",\n",
" \"other\": \"\",\n",
" \"nativeiac\": \"\",\n",
" \"terraform\": \"\"\n",
" },\n",
" \"recommendation\": {\n",
" \"url\": \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html\",\n",
" \"text\": \"Implement automated detective control (e.g. using tools like Prowler) to scan accounts for passwords and secrets. Use Secrets Manager service to store and retrieve passwords and secrets.\"\n",
" }\n",
" },\n",
" \"servicename\": \"ssm\",\n",
" \"checkaliases\": [],\n",
" \"resourcetype\": \"AwsSsmDocument\",\n",
" \"additionalurls\": [],\n",
" \"subservicename\": \"\",\n",
" \"resourceidtemplate\": \"arn:aws:ssm:region:account-id:document/document-name\"\n",
" },\n",
" \"raw_result\": {},\n",
" \"inserted_at\": \"2025-10-22T08:07:58.811165Z\",\n",
" \"updated_at\": \"2025-10-22T08:07:58.818764Z\",\n",
" \"first_seen_at\": \"2025-10-22T08:07:58.808707Z\",\n",
" \"muted\": false,\n",
" \"muted_reason\": null\n",
" },\n",
" \"relationships\": {\n",
" \"scan\": {\n",
" \"data\": {\n",
" \"type\": \"scans\",\n",
" \"id\": \"019a0aec-edbc-7815-8476-b4efb7a4059a\"\n",
" }\n",
" },\n",
" \"resources\": {\n",
" \"meta\": {\n",
" \"count\": 1\n",
" },\n",
" \"data\": [\n",
" {\n",
" \"type\": \"resources\",\n",
" \"id\": \"f1f89f3d-22f8-43f6-a912-4edb23a7522a\"\n",
" }\n",
" ]\n",
" }\n",
" },\n",
" \"links\": {\n",
" \"self\": \"https://api.dev.prowler.com/api/v1/findings/019a0af5-e65a-7735-8223-e9aa5ab4c58e\"\n",
" }\n",
" }\n",
" ],\n",
" \"meta\": {\n",
" \"pagination\": {\n",
" \"page\": 1,\n",
" \"pages\": 138,\n",
" \"count\": 138\n",
" },\n",
" \"version\": \"v1\"\n",
" }\n",
"}\n"
]
}
],
"source": [
"response = requests.get(\n",
" 'https://api.dev.prowler.com/api/v1/findings/latest',\n",
" headers=headers,\n",
" params={\n",
" 'filter[severity__in]': 'critical',\n",
" 'page[size]': '1'\n",
" }\n",
")\n",
"\n",
"print(json.dumps(response.json(), indent=2))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "prowler-6nYzd6ct-py3.12",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.11"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
+1 -1
View File
@@ -1,3 +1,3 @@
PROWLER_APP_API_KEY="pk_your_api_key_here"
PROWLER_API_BASE_URL="https://api.prowler.com"
PROWLER_MCP_MODE="stdio"
PROWLER_MCP_TRANSPORT_MODE="stdio"
+2 -1
View File
@@ -13,4 +13,5 @@ All notable changes to the **Prowler MCP Server** are documented in this file.
- Add new MCP Server for Prowler Documentation [(#8795)](https://github.com/prowler-cloud/prowler/pull/8795)
- API key support for STDIO mode and enhanced HTTP mode authentication [(#8823)](https://github.com/prowler-cloud/prowler/pull/8823)
- Add health check endpoint [(#8905)](https://github.com/prowler-cloud/prowler/pull/8905)
- Update Prowler Documentation MCP Server to use Mintlify API [(#8915)](https://github.com/prowler-cloud/prowler/pull/8915)
- Update Prowler Documentation MCP Server to use Mintlify API [(#8916)](https://github.com/prowler-cloud/prowler/pull/8916)
- Add custom production deployment using uvicorn [(#8958)](https://github.com/prowler-cloud/prowler/pull/8958)
+6 -7
View File
@@ -47,13 +47,12 @@ COPY --from=builder --chown=prowler /app/prowler_mcp_server /app/prowler_mcp_ser
# 3. Project metadata file (may be needed by some packages at runtime)
COPY --from=builder --chown=prowler /app/pyproject.toml /app/pyproject.toml
# 4. Entrypoint helper script for selecting runtime mode
COPY --from=builder --chown=prowler /app/entrypoint.sh /app/entrypoint.sh
# Add virtual environment to PATH so prowler-mcp command is available
ENV PATH="/app/.venv/bin:$PATH"
# Entry point for the MCP server
# Default to stdio mode, but allow overriding via command arguments
# Examples:
# docker run -p 8000:8000 prowler-mcp --transport http --host 0.0.0.0 --port 8000
# docker run prowler-mcp --transport stdio
ENTRYPOINT ["prowler-mcp"]
CMD ["--transport", "stdio"]
# Entrypoint wrapper defaults to CLI mode; override with `uvicorn` to run ASGI app
ENTRYPOINT ["/app/entrypoint.sh"]
CMD ["main"]
+17 -3
View File
@@ -144,11 +144,11 @@ uv run prowler-mcp --transport http
uv run prowler-mcp --transport http --host 0.0.0.0 --port 8080
```
For self-deployed MCP remote server, you can use also configure the server to use a custom API base URL with the environment variable `PROWLER_API_BASE_URL`; and the transport mode with the environment variable `PROWLER_MCP_MODE`.
For self-deployed MCP remote server, you can use also configure the server to use a custom API base URL with the environment variable `PROWLER_API_BASE_URL`; and the transport mode with the environment variable `PROWLER_MCP_TRANSPORT_MODE`.
```bash
export PROWLER_API_BASE_URL="https://api.prowler.com"
export PROWLER_MCP_MODE="http"
export PROWLER_MCP_TRANSPORT_MODE="http"
```
### Using uv directly
@@ -190,6 +190,16 @@ docker run --rm --env-file ./.env -p 8000:8000 -it prowler-mcp --transport http
docker run --rm --env-file ./.env -p 8080:8080 -it prowler-mcp --transport http --host 0.0.0.0 --port 8080
```
## Production Deployment
For production deployments that require customization, it is recommended to use the ASGI application that can be found in `prowler_mcp_server.server`. This can be run with uvicorn:
```bash
uvicorn prowler_mcp_server.server:app --host 0.0.0.0 --port 8000
```
For more details on production deployment options, see the [FastMCP production deployment guide](https://gofastmcp.com/deployment/http#production-deployment) and [uvicorn settings](https://www.uvicorn.org/settings/).
## Command Line Arguments
The Prowler MCP server supports the following command line arguments:
@@ -482,6 +492,10 @@ If you want to have it globally available, add the example server to Cursor's co
If you want to have it only for the current project, add the example server to the project's root in a new `.cursor/mcp.json` file.
## Documentation
For detailed documentation about the Prowler MCP Server, including guides, tutorials, and use cases, visit the [official Prowler documentation](https://docs.prowler.com).
## License
This project follows the repositorys main license. See the [LICENSE](../LICENSE) file at the repository root.
This project follows the repository's main license. See the [LICENSE](../LICENSE) file at the repository root.
+50
View File
@@ -0,0 +1,50 @@
#!/bin/sh
set -eu
usage() {
cat <<'EOF'
Usage: ./entrypoint.sh [main|uvicorn] [args...]
Modes:
main (default) Run prowler-mcp
uvicorn Run uvicorn prowler_mcp_server.server:app
All additional arguments are forwarded to the selected command.
EOF
}
mode="main"
if [ "$#" -gt 0 ]; then
case "$1" in
main|cli)
mode="main"
shift
;;
uvicorn|asgi)
mode="uvicorn"
shift
;;
-h|--help)
usage
exit 0
;;
*)
mode="main"
;;
esac
fi
case "$mode" in
main)
exec prowler-mcp "$@"
;;
uvicorn)
export PROWLER_MCP_TRANSPORT_MODE="http"
exec uvicorn prowler_mcp_server.server:app "$@"
;;
*)
usage
exit 1
;;
esac
+18 -7
View File
@@ -1,10 +1,8 @@
import argparse
import asyncio
import os
import sys
from prowler_mcp_server.lib.logger import logger
from prowler_mcp_server.server import setup_main_server
def parse_arguments():
@@ -13,7 +11,7 @@ def parse_arguments():
parser.add_argument(
"--transport",
choices=["stdio", "http"],
default=os.getenv("PROWLER_MCP_MODE", "stdio"),
default=None,
help="Transport method (default: stdio)",
)
parser.add_argument(
@@ -35,13 +33,26 @@ def main():
try:
args = parse_arguments()
# Set up server with configuration
prowler_mcp_server = asyncio.run(setup_main_server(transport=args.transport))
print(f"args.transport: {args.transport}")
if args.transport is None:
args.transport = os.getenv("PROWLER_MCP_TRANSPORT_MODE", "stdio")
else:
os.environ["PROWLER_MCP_TRANSPORT_MODE"] = args.transport
from prowler_mcp_server.server import prowler_mcp_server
if args.transport == "stdio":
prowler_mcp_server.run(transport="stdio")
prowler_mcp_server.run(transport=args.transport, show_banner=False)
elif args.transport == "http":
prowler_mcp_server.run(transport="http", host=args.host, port=args.port)
prowler_mcp_server.run(
transport=args.transport,
host=args.host,
port=args.port,
show_banner=False,
)
else:
logger.error(f"Invalid transport: {args.transport}")
except KeyboardInterrupt:
logger.info("Shutting down Prowler MCP server...")
@@ -14,7 +14,7 @@ class ProwlerAppAuth:
def __init__(
self,
mode: str = os.getenv("PROWLER_MCP_MODE", "stdio"),
mode: str = os.getenv("PROWLER_MCP_TRANSPORT_MODE", "stdio"),
base_url: str = os.getenv("PROWLER_API_BASE_URL", "https://api.prowler.com"),
):
self.base_url = base_url.rstrip("/")
@@ -33,7 +33,14 @@ class ProwlerAppAuth:
raise ValueError("Prowler App API key format is incorrect")
def _parse_jwt(self, token: str) -> Optional[Dict]:
"""Parse JWT token and return payload, similar to JS parseJwt function."""
"""Parse JWT token and return payload
Args:
token: JWT token to parse
Returns:
Parsed JWT payload, or None if parsing fails
"""
if not token:
return None
+26 -13
View File
@@ -1,16 +1,16 @@
import asyncio
import os
from fastmcp import FastMCP
from prowler_mcp_server import __version__
from prowler_mcp_server.lib.logger import logger
from starlette.responses import JSONResponse
prowler_mcp_server = FastMCP("prowler-mcp-server")
async def setup_main_server(transport: str) -> FastMCP:
async def setup_main_server():
"""Set up the main Prowler MCP server with all available integrations."""
# Initialize main Prowler MCP server
prowler_mcp_server = FastMCP("prowler-mcp-server")
# Import Prowler Hub tools with prowler_hub_ prefix
try:
logger.info("Importing Prowler Hub server...")
@@ -21,12 +21,10 @@ async def setup_main_server(transport: str) -> FastMCP:
except Exception as e:
logger.error(f"Failed to import Prowler Hub server: {e}")
# Import Prowler App tools with prowler_app_ prefix
try:
logger.info("Importing Prowler App server...")
if os.getenv("PROWLER_MCP_MODE", None) is None:
os.environ["PROWLER_MCP_MODE"] = transport
if not os.path.exists(
os.path.join(os.path.dirname(__file__), "prowler_app", "server.py")
):
@@ -44,6 +42,7 @@ async def setup_main_server(transport: str) -> FastMCP:
except Exception as e:
logger.error(f"Failed to import Prowler App server: {e}")
# Import Prowler Documentation tools with prowler_docs_ prefix
try:
logger.info("Importing Prowler Documentation server...")
from prowler_mcp_server.prowler_documentation.server import docs_mcp_server
@@ -53,9 +52,23 @@ async def setup_main_server(transport: str) -> FastMCP:
except Exception as e:
logger.error(f"Failed to import Prowler Documentation server: {e}")
# Add health check endpoint
@prowler_mcp_server.custom_route("/health", methods=["GET"])
async def health_check(request):
return JSONResponse({"status": "healthy", "service": "prowler-mcp-server"})
return prowler_mcp_server
# Add health check endpoint
@prowler_mcp_server.custom_route("/health", methods=["GET"])
async def health_check(request) -> JSONResponse:
"""Health check endpoint."""
return JSONResponse(
{"status": "healthy", "service": "prowler-mcp-server", "version": __version__}
)
# Get or create the event loop
try:
loop = asyncio.get_running_loop()
# If we have a running loop, schedule the setup as a task
loop.create_task(setup_main_server())
except RuntimeError:
# No running loop, use asyncio.run (for standalone execution)
asyncio.run(setup_main_server())
app = prowler_mcp_server.http_app()
+3
View File
@@ -32,10 +32,13 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Update AWS AppStream service metadata to new format [(#8789)](https://github.com/prowler-cloud/prowler/pull/8789)
- Update AWS API Gateway service metadata to new format [(#8788)](https://github.com/prowler-cloud/prowler/pull/8788)
- Update AWS Athena service metadata to new format [(#8790)](https://github.com/prowler-cloud/prowler/pull/8790)
- Update AWS CloudTrail service metadata to new format [(#8831)](https://github.com/prowler-cloud/prowler/pull/8831)
- Update AWS Auto Scaling service metadata to new format [(#8824)](https://github.com/prowler-cloud/prowler/pull/8824)
- Update AWS Backup service metadata to new format [(#8826)](https://github.com/prowler-cloud/prowler/pull/8826)
- Update AWS CloudFormation service metadata to new format [(#8828)](https://github.com/prowler-cloud/prowler/pull/8828)
- Update AWS Lambda service metadata to new format [(#8825)](https://github.com/prowler-cloud/prowler/pull/8825)
- Update AWS DLM service metadata to new format [(#8860)](https://github.com/prowler-cloud/prowler/pull/8860)
- Update AWS DMS service metadata to new format [(#8861)](https://github.com/prowler-cloud/prowler/pull/8861)
- Update AWS Directory Service service metadata to new format [(#8859)](https://github.com/prowler-cloud/prowler/pull/8859)
- Update AWS CloudFront service metadata to new format [(#8829)](https://github.com/prowler-cloud/prowler/pull/8829)
- Deprecate user authentication for M365 provider [(#8865)](https://github.com/prowler-cloud/prowler/pull/8865)
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -819,18 +819,6 @@
"aws-us-gov": []
}
},
"apptest": {
"regions": {
"aws": [
"ap-southeast-2",
"eu-central-1",
"sa-east-1",
"us-east-1"
],
"aws-cn": [],
"aws-us-gov": []
}
},
"aps": {
"regions": {
"aws": [
@@ -8723,6 +8711,7 @@
"ap-southeast-5",
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-south-2",
"eu-west-1",
@@ -9207,11 +9196,13 @@
"ap-east-2",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-south-2",
"ap-southeast-1",
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"eu-central-1",
@@ -12436,7 +12427,12 @@
"workspaces-instances": {
"regions": {
"aws": [
"ap-northeast-2"
"ap-east-1",
"ap-northeast-2",
"ap-southeast-5",
"eu-south-2",
"me-central-1",
"us-east-2"
],
"aws-cn": [],
"aws-us-gov": []
@@ -1,33 +1,38 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_bucket_requires_mfa_delete",
"CheckTitle": "Ensure the S3 bucket CloudTrail bucket requires MFA delete",
"CheckTitle": "CloudTrail trail S3 bucket has MFA delete enabled",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure the S3 bucket CloudTrail bucket requires MFA",
"Risk": "If the S3 bucket CloudTrail bucket does not require MFA, it can be deleted by an attacker.",
"Description": "**CloudTrail log buckets** for actively logging trails are evaluated for **MFA Delete** on the associated S3 bucket. The assessment determines whether `MFA Delete` is configured on the in-account log bucket; *if the bucket resides in another account, its configuration should be verified separately*.",
"Risk": "Without **MFA Delete**, stolen or over-privileged credentials can permanently delete log versions or change versioning, compromising log **integrity** and **availability**. This enables attacker cover-ups, hinders **forensics**, and weakens evidence for investigations.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudTrail/cloudtrail-bucket-mfa-delete-enabled.html"
],
"Remediation": {
"Code": {
"CLI": "aws s3api put-bucket-versioning --bucket DOC-EXAMPLE-BUCKET1 --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa \"SERIAL 123456\"",
"CLI": "aws s3api put-bucket-versioning --bucket <CLOUDTRAIL_BUCKET_NAME> --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa \"<MFA_SERIAL> <MFA_CODE>\"",
"NativeIaC": "",
"Other": "",
"Other": "1. Sign in to the AWS Management Console as the root user with MFA enabled\n2. Open AWS CloudShell (from the top navigation bar)\n3. Run:\n ```bash\n aws s3api put-bucket-versioning --bucket <CLOUDTRAIL_BUCKET_NAME> --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa \"<MFA_SERIAL> <MFA_CODE>\"\n ```",
"Terraform": ""
},
"Recommendation": {
"Text": "Configure MFA Delete for the S3 bucket CloudTrail bucket",
"Url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html"
"Text": "Enable `MFA Delete` on the CloudTrail log bucket with versioning enabled. Enforce **least privilege** so only tightly controlled identities can delete or alter logs, and require MFA for such actions. Apply **defense in depth** using a dedicated logging account and log file integrity validation.",
"Url": "https://hub.prowler.com/check/cloudtrail_bucket_requires_mfa_delete"
}
},
"Categories": [
"identity-access",
"forensics-ready"
],
"DependsOn": [],
@@ -1,35 +1,39 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_cloudwatch_logging_enabled",
"CheckTitle": "Ensure CloudTrail trails are integrated with CloudWatch Logs",
"CheckTitle": "CloudTrail trail has delivered logs to CloudWatch Logs in the last 24 hours",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail trails are integrated with CloudWatch Logs",
"Risk": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.",
"Description": "**CloudTrail trails** are configured to send events to **CloudWatch Logs**, and show recent delivery within the last `24h`. Trails without integration or without recent CloudWatch delivery are identified, across single-Region and multi-Region trails.",
"Risk": "Missing or stale CloudWatch delivery weakens visibility and delays detection, impacting confidentiality and integrity. Adversaries can:\n- Hide **privilege escalation**\n- Perform unauthorized **resource changes**\n- Exfiltrate data via API misuse",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.prowler.com/checks/aws/logging-policies/logging_4#aws-console",
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail update-trail --name <trail_name> --cloudwatch-logs-log-group- arn <cloudtrail_log_group_arn> --cloudwatch-logs-role-arn <cloudtrail_cloudwatchLogs_role_arn>",
"NativeIaC": "",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_4#aws-console",
"Terraform": ""
"CLI": "aws cloudtrail update-trail --name <trail_name> --cloud-watch-logs-log-group-arn <cloudwatch_log_group_arn> --cloud-watch-logs-role-arn <cloudwatch_logs_role_arn>",
"NativeIaC": "```yaml\n# CloudFormation: enable CloudTrail delivery to CloudWatch Logs\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n S3BucketName: \"<example_resource_name>\"\n CloudWatchLogsLogGroupArn: \"<cloudwatch_log_group_arn>\" # CRITICAL: sends CloudTrail events to CloudWatch Logs\n CloudWatchLogsRoleArn: \"<cloudwatch_logs_role_arn>\" # CRITICAL: role CloudTrail assumes to deliver events\n```",
"Other": "1. In AWS Console, go to CloudTrail > Trails and select the trail\n2. In the CloudWatch Logs section, click Edit\n3. Set CloudWatch Logs to Enabled\n4. Choose an existing Log group (or create new) and select an IAM role with permissions for CreateLogStream/PutLogEvents\n5. Click Save changes\n6. After a few minutes, verify events appear in the chosen CloudWatch Logs log group",
"Terraform": "```hcl\n# Terraform: enable CloudTrail delivery to CloudWatch Logs\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n cloud_watch_logs_group_arn = \"<cloudwatch_log_group_arn>\" # CRITICAL: sends CloudTrail events to CloudWatch Logs\n cloud_watch_logs_role_arn = \"<cloudwatch_logs_role_arn>\" # CRITICAL: role CloudTrail assumes to deliver events\n}\n```"
},
"Recommendation": {
"Text": "Validate that the trails in CloudTrail have an arn set in the CloudWatchLogsLogGroupArn property.",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html"
"Text": "Integrate every trail with **CloudWatch Logs** and maintain continuous, near-real-time delivery. Enforce **least privilege** on the delivery role, prefer **multi-Region** coverage, and implement **metric filters and alerts** for sensitive actions. Centralize retention to support **defense in depth**.",
"Url": "https://hub.prowler.com/check/cloudtrail_cloudwatch_logging_enabled"
}
},
"Categories": [
"forensics-ready",
"logging"
"logging",
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],
@@ -1,34 +1,39 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_insights_exist",
"CheckTitle": "Ensure CloudTrail Insight is enabled",
"CheckTitle": "CloudTrail trail has Insights enabled",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail Insight is enabled",
"Risk": "CloudTrail Insights provides a powerful way to search and analyze CloudTrail log data using pre-built queries and machine learning algorithms. This can help you to identify potential security threats and suspicious activity in near real-time, such as unauthorized access attempts, policy changes, or resource modifications.",
"Description": "**CloudTrail trails** that are logging are evaluated for **Insights** via `insight selectors`, which enable anomaly detection on management-event patterns (API call and error rates). The finding pinpoints logging trails where these selectors are missing.",
"Risk": "Without **Insights**, abnormal API call or error rates can go unnoticed, delaying detection of credential abuse, privilege escalation, or runaway automation. Attackers may rapidly alter policies, delete resources, or exfiltrate data before response, impacting confidentiality and availability.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-insights-events-with-cloudtrail.html",
"https://awscli.amazonaws.com/v2/documentation/api/2.18.18/reference/cloudtrail/put-insight-selectors.html",
"https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "aws cloudtrail put-insight-selectors --trail-name <TRAIL_NAME> --insight-selectors '[{\"InsightType\":\"ApiCallRateInsight\"}]'",
"NativeIaC": "```yaml\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n TrailName: <example_resource_name>\n S3BucketName: <example_resource_name>\n IsLogging: true\n InsightSelectors:\n - InsightType: ApiCallRateInsight # Critical fix: enables CloudTrail Insights on the trail\n```",
"Other": "1. In the AWS Console, go to CloudTrail > Trails\n2. Select the trail that is logging\n3. Click Edit on the CloudTrail Insights section\n4. Enable Insights and select API call rate (or Error rate)\n5. Save changes",
"Terraform": "```hcl\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n enable_logging = true\n\n insight_selector {\n insight_type = \"ApiCallRateInsight\" # Critical fix: enables CloudTrail Insights on the trail\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable CloudTrail Insight",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-insights-events-with-cloudtrail.html"
"Text": "Enable **CloudTrail Insights** on all logging trails (ideally all-Region or organization trails). Activate both `ApiCallRateInsight` and `ApiErrorRateInsight`. Integrate alerts with monitoring and review anomalies regularly. Apply **defense in depth** and least privilege to reduce potential blast radius.",
"Url": "https://hub.prowler.com/check/cloudtrail_insights_exist"
}
},
"Categories": [
"forensics-ready"
"threat-detection"
],
"DependsOn": [],
"RelatedTo": [],
@@ -1,34 +1,39 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_kms_encryption_enabled",
"CheckTitle": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs",
"CheckTitle": "CloudTrail trail logs are encrypted at rest with a KMS key",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs",
"Risk": "By default, the log files delivered by CloudTrail to your bucket are encrypted by Amazon server-side encryption with Amazon S3-managed encryption keys (SSE-S3). To provide a security layer that is directly manageable, you can instead use server-side encryption with AWS KMSmanaged keys (SSE-KMS) for your CloudTrail log files.",
"Description": "**AWS CloudTrail trails** are evaluated for use of **SSE-KMS** with a customer-managed KMS key to encrypt delivered log files at rest in S3. Trails without a configured KMS key are identified. *Applies to single-Region and multi-Region trails.*",
"Risk": "Absent a **customer-managed KMS key**, log protection relies only on storage permissions. Bucket misconfigurations or stolen credentials can expose audit data, aiding evasion and lateral movement. Missing key-level controls, rotation, and usage audit weaken **confidentiality** and **forensic integrity**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html",
"https://trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudTrail/cloudtrail-logs-encrypted.html",
"https://www.stream.security/rules/ensure-cloudtrail-logs-are-encrypted-at-rest",
"https://www.clouddefense.ai/compliance-rules/cis-v130/logging/cis-v130-3-7"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail update-trail --name <trail_name> --kms-id <cloudtrail_kms_key> aws kms put-key-policy --key-id <cloudtrail_kms_key> --policy <cloudtrail_kms_key_policy>",
"NativeIaC": "https://docs.prowler.com/checks/aws/logging-policies/logging_7#fix---buildtime",
"Other": "",
"Terraform": ""
"CLI": "aws cloudtrail update-trail --name <trail_name> --kms-key-id <kms_key_arn_or_id>",
"NativeIaC": "```yaml\n# CloudFormation: enable KMS encryption for an existing/new CloudTrail\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n S3BucketName: <example_resource_name>\n KmsKeyId: <example_resource_id> # Critical: sets the KMS key to encrypt CloudTrail logs at rest\n```",
"Other": "1. In the AWS Console, go to CloudTrail > Trails\n2. Select the trail <trail_name>, click Edit\n3. Under Log file encryption, choose Use a KMS key and select <cloudtrail_kms_key>\n4. Click Save changes",
"Terraform": "```hcl\n# Enable KMS encryption for CloudTrail\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n kms_key_id = \"<example_resource_id>\" # Critical: uses this KMS key to encrypt CloudTrail logs\n}\n```"
},
"Recommendation": {
"Text": "This approach has the following advantages: You can create and manage the CMK encryption keys yourself. You can use a single CMK to encrypt and decrypt log files for multiple accounts across all regions. You have control over who can use your key for encrypting and decrypting CloudTrail log files. You can assign permissions for the key to the users. You have enhanced security.",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html"
"Text": "Enable **SSE-KMS** on every trail using a **customer-managed KMS key**. Apply **least privilege** so only authorized roles can `Decrypt`, and enforce **separation of duties** between key admins and log readers. Rotate keys and monitor key usage to provide **defense in depth** for CloudTrail data.",
"Url": "https://hub.prowler.com/check/cloudtrail_kms_encryption_enabled"
}
},
"Categories": [
"forensics-ready",
"encryption"
],
"DependsOn": [],
@@ -1,33 +1,40 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_log_file_validation_enabled",
"CheckTitle": "Ensure CloudTrail log file validation is enabled",
"CheckTitle": "CloudTrail trail has log file validation enabled",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail log file validation is enabled",
"Risk": "Enabling log file validation will provide additional integrity checking of CloudTrail logs. ",
"Description": "**AWS CloudTrail trails** are evaluated for **log file integrity validation** being enabled (`LogFileValidationEnabled`).\n\nWhen enabled, CloudTrail generates signed digest files to verify that S3-delivered log files remain unchanged.",
"Risk": "Without validation, adversaries can alter, forge, or delete audit entries without detection, compromising log **integrity** and non-repudiation.\n\nThis impairs investigations, enables alert evasion, and obscures unauthorized changes across regions or accounts.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html",
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudTrail/cloudtrail-log-file-integrity-validation.html",
"https://deepwiki.com/acantril/learn-cantrill-io-labs/7.1-cloudtrail-log-file-integrity"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail update-trail --name <trail_name> --enable-log-file-validation",
"NativeIaC": "https://docs.prowler.com/checks/aws/logging-policies/logging_2#cloudformation",
"Other": "",
"Terraform": "https://docs.prowler.com/checks/aws/logging-policies/logging_2#terraform"
"CLI": "aws cloudtrail update-trail --name <trail_name> --enable-log-file-validation",
"NativeIaC": "```yaml\n# CloudFormation: Enable log file validation on a CloudTrail trail\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n S3BucketName: <example_resource_name>\n EnableLogFileValidation: true # Critical: enables integrity validation for delivered log files\n```",
"Other": "1. Open the AWS Console and go to CloudTrail\n2. Click Trails and select <trail_name>\n3. Click Edit\n4. In Additional/Advanced settings, check Enable log file validation\n5. Click Save changes",
"Terraform": "```hcl\n# Enable log file validation on a CloudTrail trail\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n enable_log_file_validation = true # Critical: ensures CloudTrail writes signed digests to detect tampering\n}\n```"
},
"Recommendation": {
"Text": "Ensure LogFileValidationEnabled is set to true for each trail.",
"Url": "http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-filevalidation-enabling.html"
"Text": "Enable **log file integrity validation** on all trails (`LogFileValidationEnabled=true`).\n\nEnforce **least privilege** on the logs bucket, retain and protect digest files (e.g., S3 Object Lock/MFA Delete), and monitor validation results to support **defense in depth**.",
"Url": "https://hub.prowler.com/check/cloudtrail_log_file_validation_enabled"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
@@ -1,33 +1,38 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_logs_s3_bucket_access_logging_enabled",
"CheckTitle": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket",
"CheckTitle": "CloudTrail trail destination S3 bucket has access logging enabled",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket",
"Risk": "Server access logs can assist you in security and access audits, help you learn about your customer base, and understand your Amazon S3 bill.",
"Description": "CloudTrail trails deliver logs to an S3 bucket; this evaluates whether that bucket has **S3 server access logging** enabled to record requests against it.\n\n*If the destination bucket is outside the account or audit scope, a manual review is indicated.*",
"Risk": "Without access logging on the CloudTrail logs bucket, access and changes to log files lack an independent audit trail. Attackers could read, delete, or replace logs without attribution, undermining **log confidentiality** and **integrity**, and slowing **incident response**.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/cloudtrail-controls.html",
"https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_6#aws-console",
"Terraform": ""
"CLI": "aws s3api put-bucket-logging --bucket <CLOUDTRAIL_BUCKET_NAME> --bucket-logging-status \"{\\\"LoggingEnabled\\\":{\\\"TargetBucket\\\":\\\"<TARGET_BUCKET_NAME>\\\"}}\"",
"NativeIaC": "```yaml\n# CloudFormation: enable S3 access logging on the CloudTrail destination bucket\nResources:\n <example_log_bucket_name>:\n Type: AWS::S3::Bucket\n\n <example_cloudtrail_bucket>:\n Type: AWS::S3::Bucket\n Properties:\n LoggingConfiguration:\n DestinationBucketName: !Ref <example_log_bucket_name> # Critical: turns on server access logging to this destination bucket\n # This enables access logging so the check passes\n```",
"Other": "1. In the AWS Console, go to S3 and open the bucket used by your CloudTrail trail\n2. Select the Properties tab\n3. In Server access logging, click Edit\n4. Enable logging and choose a different destination S3 bucket for the logs\n5. Click Save changes",
"Terraform": "```hcl\n# Enable access logging on the CloudTrail S3 bucket\nresource \"aws_s3_bucket\" \"<example_log_bucket_name>\" {\n bucket = \"<example_log_bucket_name>\"\n}\n\nresource \"aws_s3_bucket\" \"<example_bucket_name>\" {\n bucket = \"<example_bucket_name>\"\n}\n\nresource \"aws_s3_bucket_logging\" \"<example_resource_name>\" {\n bucket = aws_s3_bucket.<example_bucket_name>.id\n target_bucket = aws_s3_bucket.<example_log_bucket_name>.id # Critical: enables server access logging to the target bucket\n}\n```"
},
"Recommendation": {
"Text": "Ensure that S3 buckets have Logging enabled. CloudTrail data events can be used in place of S3 bucket logging. If that is the case, this finding can be considered a false positive.",
"Url": "https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html"
"Text": "Enable **S3 server access logging** on the CloudTrail logs bucket and write logs to a separate, tightly controlled bucket. Apply **least privilege**, enable **versioning**, and consider **Object Lock** to deter tampering. Centralize monitoring to support defense-in-depth and rapid investigation.",
"Url": "https://hub.prowler.com/check/cloudtrail_logs_s3_bucket_access_logging_enabled"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
@@ -1,37 +1,45 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_logs_s3_bucket_is_not_publicly_accessible",
"CheckTitle": "Ensure the S3 bucket CloudTrail logs is not publicly accessible",
"CheckTitle": "CloudTrail trail S3 bucket is not publicly accessible",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
"Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
"Effects/Data Exposure"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure the S3 bucket CloudTrail logs to is not publicly accessible",
"Risk": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected accounts use or configuration.",
"ResourceType": "AwsS3Bucket",
"Description": "CloudTrail log destination **S3 buckets** are inspected for ACL grants that expose data to the public `AllUsers` group.\n\nBuckets hosted in other accounts are flagged for out-of-scope review.",
"Risk": "Exposed CloudTrail logs erode **confidentiality** and **integrity**.\n\nAdversaries can harvest API activity to map accounts, roles, and keys, enabling **reconnaissance** and evasion. If write is allowed, logs can be **poisoned** or deleted, thwarting investigations and compromising incident timelines.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudTrail/cloudtrail-bucket-publicly-accessible.html",
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html",
"https://docs.aws.amazon.com/config/latest/developerguide/cloudtrail-s3-bucket-public-access-prohibited.html",
"https://docs.panther.com/alerts/alert-runbooks/built-in-policies/aws-cloudtrail-logs-s3-bucket-not-publicly-accessible"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_3#aws-console",
"Terraform": ""
"CLI": "aws s3api put-bucket-acl --bucket <example_resource_name> --acl private",
"NativeIaC": "```yaml\n# CloudFormation: ensure the CloudTrail S3 bucket ACL is not public\nResources:\n CloudTrailLogsBucket:\n Type: AWS::S3::Bucket\n Properties:\n BucketName: <example_resource_name>\n AccessControl: Private # CRITICAL: sets bucket ACL to private, removing any AllUsers (public) grants\n```",
"Other": "1. Open the AWS S3 Console\n2. Select the bucket used by CloudTrail\n3. Go to Permissions > Access control list (ACL)\n4. Click Edit under Public access, remove any grants to \"Everyone (public access)\" (uncheck Read/Write)\n5. Save changes",
"Terraform": "```hcl\n# Ensure the CloudTrail S3 bucket ACL is private\nresource \"aws_s3_bucket_acl\" \"fix_cloudtrail_logs_bucket\" {\n bucket = \"<example_resource_name>\"\n acl = \"private\" # CRITICAL: removes any public (AllUsers) ACL grants\n}\n```"
},
"Recommendation": {
"Text": "Analyze Bucket policy to validate appropriate permissions. Ensure the AllUsers principal is not granted privileges. Ensure the AuthenticatedUsers principal is not granted privileges.",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html"
"Text": "Apply **least privilege** to the log bucket:\n- Enable S3 `Block Public Access` (account and bucket)\n- Remove `AllUsers`/`AuthenticatedUsers` ACLs; avoid wildcard principals\n- Permit only CloudTrail and constrain with `aws:SourceArn`\n\nUse a dedicated private bucket and monitor for permission changes.",
"Url": "https://hub.prowler.com/check/cloudtrail_logs_s3_bucket_is_not_publicly_accessible"
}
},
"Categories": [
"forensics-ready",
"internet-exposed"
],
"DependsOn": [],
"DependsOn": [
"s3_bucket_public_access"
],
"RelatedTo": [],
"Notes": ""
}
@@ -1,33 +1,37 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_multi_region_enabled",
"CheckTitle": "Ensure CloudTrail is enabled in all regions",
"CheckTitle": "Region has at least one CloudTrail trail logging",
"CheckType": [
"Software and Configuration Checks",
"Industry and Regulatory Standards",
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail is enabled in all regions",
"Risk": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service.",
"Description": "**AWS CloudTrail** has at least one trail with `logging` enabled in every region. A **multi-region trail** or a regional trail counts for coverage in that region.",
"Risk": "Missing coverage in any region creates **visibility gaps**.\n\nAttackers can use lesser-monitored regions to run API actions, hide **unauthorized changes**, and exfiltrate data without audit trails, weakening **detective controls**, hindering **forensics**, and delaying response (confidentiality and integrity).",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrailconcepts.html#cloudtrail-concepts-management-events"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail create-trail --name <trail_name> --bucket-name <s3_bucket_for_cloudtrail> --is-multi-region-trail aws cloudtrail update-trail --name <trail_name> --is-multi-region-trail ",
"NativeIaC": "https://docs.prowler.com/checks/aws/logging-policies/logging_1#cloudformation",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_1#aws-console",
"Terraform": "https://docs.prowler.com/checks/aws/logging-policies/logging_1#terraform"
"CLI": "",
"NativeIaC": "```yaml\n# CloudFormation: Create a multi-region CloudTrail and start logging\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n TrailName: <example_resource_name>\n S3BucketName: <example_resource_name>\n IsMultiRegionTrail: true # Critical: applies the trail to all regions\n IsLogging: true # Critical: ensures the trail is logging\n```",
"Other": "1. In the AWS Console, go to CloudTrail > Trails\n2. If no trail exists: Click Create trail, enter a name, choose an S3 bucket, set Apply trail to all regions = Yes, then Create (logging starts)\n3. If a trail exists: Select it, click Edit, set Apply trail to all regions = Yes, Save\n4. If Status shows Not logging, click Start logging",
"Terraform": "```hcl\n# Terraform: Multi-region CloudTrail with logging enabled\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n\n is_multi_region_trail = true # Critical: applies the trail to all regions\n enable_logging = true # Critical: ensures the trail is logging\n}\n```"
},
"Recommendation": {
"Text": "Ensure Logging is set to ON on all regions (even if they are not being used at the moment.",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrailconcepts.html#cloudtrail-concepts-management-events"
"Text": "Use a **multi-region CloudTrail trail** or per-region trails so `logging` is active in every region, including unused ones.\n\nCentralize logs, enforce **least privilege** to log stores, and add **defense-in-depth** with encryption, integrity validation, and retention. Continuously monitor trail health to catch gaps.",
"Url": "https://hub.prowler.com/check/cloudtrail_multi_region_enabled"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
@@ -1,31 +1,38 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_multi_region_enabled_logging_management_events",
"CheckTitle": "Ensure CloudTrail logging management events in All Regions",
"CheckTitle": "CloudTrail trail logs management events for read and write operations",
"CheckType": [
"CIS AWS Foundations Benchmark"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure CloudTrail logging management events in All Regions",
"Risk": "AWS CloudTrail enables governance, compliance, operational auditing, and risk auditing of your AWS account. To meet FTR requirements, you must have management events enabled for all AWS accounts and in all regions and aggregate these logs into an Amazon Simple Storage Service (Amazon S3) bucket owned by a separate AWS account.",
"RelatedUrl": "https://docs.prowler.com/checks/aws/logging-policies/logging_14",
"Description": "**CloudTrail trails** record **management events** (`read` and `write`) in every AWS region and are actively logging, using a multi-region trail or per-region coverage.",
"Risk": "Without region-wide management event logging, changes to identities, networking, and audit settings can go untracked.\n\nAdversaries can operate in overlooked regions to create resources, modify permissions, or disable logging, undermining **integrity**, **confidentiality**, and incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.prowler.com/checks/aws/logging-policies/logging_14#terraform",
"https://docs.prowler.com/checks/aws/logging-policies/logging_14"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail update-trail --name <trail_name> --is-multi-region-trail",
"NativeIaC": "",
"Other": "https://docs.prowler.com/checks/aws/logging-policies/logging_14",
"Terraform": "https://docs.prowler.com/checks/aws/logging-policies/logging_14#terraform"
"CLI": "",
"NativeIaC": "```yaml\n# CloudFormation: enable multi-region and log management events (read & write)\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n S3BucketName: <example_resource_name>\n IsMultiRegionTrail: true # CRITICAL: apply the trail to all regions\n EventSelectors:\n - IncludeManagementEvents: true # CRITICAL: log management events\n ReadWriteType: All # CRITICAL: log both read and write\n```",
"Other": "1. In the AWS Console, go to CloudTrail > Trails and select your trail\n2. Click Edit\n3. Set Apply trail to all regions to Yes\n4. Under Management events, set Read/write events to All\n5. Click Save changes\n6. If Logging is Off, click Start logging",
"Terraform": "```hcl\n# Terraform: enable multi-region and log management events (read & write)\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n\n is_multi_region_trail = true # CRITICAL: apply the trail to all regions\n\n event_selector {\n include_management_events = true # CRITICAL: log management events\n read_write_type = \"All\" # CRITICAL: log both read & write\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable CloudTrail logging management events in All Regions",
"Url": "https://docs.prowler.com/checks/aws/logging-policies/logging_14"
"Text": "Enable a **multi-region CloudTrail** that logs **management events** for `read` and `write` in all regions.\n\nCentralize logs in a separate, locked-down account; apply **least privilege**, encryption, retention, and integrity validation; and protect trails and storage with tamper-evident, deny-delete controls for **defense-in-depth**.",
"Url": "https://hub.prowler.com/check/cloudtrail_multi_region_enabled_logging_management_events"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
@@ -1,31 +1,41 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_s3_dataevents_read_enabled",
"CheckTitle": "Check if S3 buckets have Object-level logging for read events is enabled in CloudTrail.",
"CheckTitle": "CloudTrail trail records S3 object-level read events for all S3 buckets",
"CheckType": [
"Logging and Monitoring"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure that all your AWS CloudTrail trails are configured to log Data events in order to record S3 object-level API operations, such as GetObject, DeleteObject and PutObject, for individual S3 buckets or for all current and future S3 buckets provisioned in your AWS account.",
"Risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.",
"Description": "**CloudTrail trails** log **S3 object-level read data events** for all buckets, capturing object access (for example `GetObject`) via selectors targeting `AWS::S3::Object`",
"Risk": "Without **object-level read logging**, S3 access is opaque. Attackers or insiders can exfiltrate data via `GetObject` without audit trails, eroding **confidentiality** and hindering **forensics**, anomaly detection, and incident response.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://awswala.medium.com/enable-cloudtrail-data-events-logging-for-objects-in-an-s3-bucket-33cade51ae2b",
"https://docs.aws.amazon.com/securityhub/latest/userguide/s3-controls.html#s3-23",
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html",
"https://www.plerion.com/cloud-knowledge-base/ensure-object-level-logging-for-read-events-enabled-for-s3-bucket"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail put-event-selectors --trail-name <YOUR_TRAIL_NAME_HERE> --event-selectors '[{ 'ReadWriteType': 'ReadOnly', 'IncludeManagementEvents':true, 'DataResources': [{ 'Type': 'AWS::S3::Object', 'Values': ['arn:aws:s3'] }] }]'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/s3-controls.html#s3-23",
"Terraform": ""
"CLI": "aws cloudtrail put-event-selectors --trail-name <example_resource_name> --event-selectors '[{\"ReadWriteType\":\"ReadOnly\",\"DataResources\":[{\"Type\":\"AWS::S3::Object\",\"Values\":[\"arn:aws:s3\"]}]}]'",
"NativeIaC": "```yaml\n# CloudFormation: enable S3 object-level READ data events for all buckets on a trail\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n S3BucketName: <example_resource_name>\n EventSelectors:\n - ReadWriteType: ReadOnly # CRITICAL: log read-only data events\n DataResources:\n - Type: AWS::S3::Object # CRITICAL: target S3 object-level events\n Values:\n - arn:aws:s3 # CRITICAL: applies to all S3 buckets/objects\n```",
"Other": "1. In the AWS Console, open CloudTrail and select Trails\n2. Open your trail and go to the Data events section\n3. Add data event for S3 and choose All current and future S3 buckets\n4. Select only Read events (or All if Read-only is unavailable)\n5. Save changes",
"Terraform": "```hcl\n# Terraform: enable S3 object-level READ data events for all buckets on a trail\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n\n event_selector {\n read_write_type = \"ReadOnly\" # CRITICAL: log read-only data events\n data_resource {\n type = \"AWS::S3::Object\" # CRITICAL: target S3 object-level events\n values = [\"arn:aws:s3\"] # CRITICAL: apply to all S3 buckets/objects\n }\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.",
"Url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html"
"Text": "Enable CloudTrail **data events** for S3 objects with `ReadOnly` (or `All`) across all current and future buckets. Use a multi-Region trail, centralize logs in an encrypted bucket with lifecycle retention, and integrate monitoring/alerts to support **defense in depth** and accountable access.",
"Url": "https://hub.prowler.com/check/cloudtrail_s3_dataevents_read_enabled"
}
},
"Categories": [],
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,41 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_s3_dataevents_write_enabled",
"CheckTitle": "Check if S3 buckets have Object-level logging for write events is enabled in CloudTrail.",
"CheckTitle": "CloudTrail trail records all S3 object-level API operations for all buckets",
"CheckType": [
"Logging and Monitoring"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "low",
"ResourceType": "AwsCloudTrailTrail",
"Description": "Ensure that all your AWS CloudTrail trails are configured to log Data events in order to record S3 object-level API operations, such as GetObject, DeleteObject and PutObject, for individual S3 buckets or for all current and future S3 buckets provisioned in your AWS account.",
"Risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.",
"Description": "**CloudTrail trails** include **S3 object-level data events** for **write (or all) operations** across **all current and future buckets**, via classic or advanced selectors. This records actions like `PutObject`, `DeleteObject`, and multipart uploads at the object level.",
"Risk": "Without object-level write logging, unauthorized or accidental changes and deletions can go unobserved, undermining data **integrity** and **availability**. Forensics lose visibility into who modified or removed objects, hindering detection of ransomware, rogue automation, or insider tampering.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html",
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html",
"https://www.go2share.net/article/s3-bucket-logging",
"https://docs.amazonaws.cn/en_us/AmazonS3/latest/userguide/cloudtrail-logging-s3-info.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/s3-controls.html#s3-22"
],
"Remediation": {
"Code": {
"CLI": "aws cloudtrail put-event-selectors --trail-name <YOUR_TRAIL_NAME_HERE> --event-selectors '[{ 'ReadWriteType': 'WriteOnly', 'IncludeManagementEvents':true, 'DataResources': [{ 'Type': 'AWS::S3::Object', 'Values': ['arn:aws:s3'] }] }]'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/s3-controls.html#s3-22",
"Terraform": ""
"CLI": "aws cloudtrail put-event-selectors --trail-name <example_resource_name> --event-selectors '[{\"ReadWriteType\":\"WriteOnly\",\"DataResources\":[{\"Type\":\"AWS::S3::Object\",\"Values\":[\"arn:aws:s3\"]}]}]'",
"NativeIaC": "```yaml\nResources:\n <example_resource_name>:\n Type: AWS::CloudTrail::Trail\n Properties:\n TrailName: <example_resource_name>\n S3BucketName: <example_resource_name>\n EventSelectors:\n - ReadWriteType: WriteOnly\n DataResources:\n - Type: AWS::S3::Object\n Values:\n - arn:aws:s3 # Critical: enables S3 object-level write data events for all buckets, fixing the check\n```",
"Other": "1. In the AWS Console, open CloudTrail and go to Trails\n2. Select <your trail> and click Edit under Data events\n3. For Data event source, choose S3\n4. Select All current and future S3 buckets\n5. Check Write events (or All events)\n6. Click Save changes",
"Terraform": "```hcl\nresource \"aws_cloudtrail\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n s3_bucket_name = \"<example_resource_name>\"\n\n event_selector {\n read_write_type = \"WriteOnly\"\n data_resource {\n type = \"AWS::S3::Object\"\n values = [\"arn:aws:s3\"] # Critical: logs S3 object-level write events for all buckets to pass the check\n }\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.",
"Url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html"
"Text": "Enable **CloudTrail S3 data events** for object-level **write** (and *optionally* read) across all buckets on a multi-Region trail. Apply **least privilege** to log storage, set **lifecycle** retention, and integrate alerts. Use **advanced selectors** to target sensitive buckets/operations for cost control and **defense in depth**.",
"Url": "https://hub.prowler.com/check/cloudtrail_s3_dataevents_write_enabled"
}
},
"Categories": [
"logging",
"forensics-ready"
],
"DependsOn": [],
@@ -1,26 +1,37 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_threat_detection_enumeration",
"CheckTitle": "Ensure there are no potential enumeration threats in CloudTrail",
"CheckType": [],
"CheckTitle": "CloudTrail logs show no potential enumeration activity",
"CheckType": [
"TTPs/Discovery",
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
"Unusual Behaviors/User"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsCloudTrailTrail",
"Description": "This check ensures that there are no potential enumeration threats in CloudTrail.",
"Risk": "Potential enumeration threats in CloudTrail can lead to unauthorized access to resources.",
"Description": "**CloudTrail activity** is analyzed for AWS identities executing a broad mix of discovery APIs like `List*`, `Describe*`, and `Get*` within a recent time window.\n\nAn identity exceeding a configurable ratio of these actions indicates potential enumeration behavior by that principal.",
"Risk": "Concentrated discovery activity signals **reconnaissance** with valid credentials. Adversaries can map assets and policies to enable **privilege escalation**, target data stores for **exfiltration** (confidentiality), and identify services to disrupt (availability), supporting stealthy lateral movement.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://medium.com/falconforce/falconfriday-detecting-enumeration-in-aws-0xff25-orangecon-25-edition-4aee83651088",
"https://www.elastic.co/guide/en/security/8.19/aws-discovery-api-calls-via-cli-from-a-single-resource.html",
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-logging-data-events",
"https://aws.plainenglish.io/aws-cloudtrail-event-cheatsheet-a-detection-engineers-guide-to-critical-api-calls-part-1-04fb1588556f",
"https://support.icompaas.com/support/solutions/articles/62000233455-ensure-there-are-no-potential-enumeration-threats-in-cloudtrail-"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"CLI": "aws iam update-access-key --user-name <USER_NAME> --access-key-id <ACCESS_KEY_ID> --status Inactive",
"NativeIaC": "```yaml\n# CloudFormation: deny common enumeration APIs for a specific IAM user\nResources:\n DenyEnumerationPolicy:\n Type: AWS::IAM::Policy\n Properties:\n PolicyName: deny-enumeration\n PolicyDocument:\n Version: \"2012-10-17\"\n Statement:\n - Effect: Deny # CRITICAL: blocks typical enumeration calls\n Action:\n - ec2:Describe* # CRITICAL: deny EC2 describe APIs\n - iam:List* # CRITICAL: deny IAM list APIs\n - s3:List* # CRITICAL: deny S3 list APIs\n - s3:Get* # CRITICAL: deny S3 get APIs (e.g., GetBucketAcl)\n Resource: \"*\"\n Users:\n - \"<example_resource_name>\" # CRITICAL: target the enumerating user\n```",
"Other": "1. In AWS Console, go to IAM > Users and open the user shown in the alert (ARN in the finding)\n2. Select the Security credentials tab\n3. For each active Access key, click Deactivate to set status to Inactive\n4. If the activity came from an EC2 instance role: go to EC2 > Instances > select the instance > Security > IAM role > Detach IAM role\n5. Re-run the check to confirm no new enumeration events occur",
"Terraform": "```hcl\n# Deny common enumeration APIs for a specific IAM user\nresource \"aws_iam_user_policy\" \"<example_resource_name>\" {\n name = \"deny-enumeration\"\n user = \"<example_user_name>\"\n\n policy = jsonencode({\n Version = \"2012-10-17\",\n Statement = [{\n Effect = \"Deny\", # CRITICAL: blocks typical enumeration calls\n Action = [\n \"ec2:Describe*\", # CRITICAL\n \"iam:List*\", # CRITICAL\n \"s3:List*\", # CRITICAL\n \"s3:Get*\" # CRITICAL\n ],\n Resource = \"*\"\n }]\n })\n}\n```"
},
"Recommendation": {
"Text": "To remediate this issue, ensure that there are no potential enumeration threats in CloudTrail.",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-logging-data-events"
"Text": "Apply **least privilege** to limit `List*`/`Describe*`/`Get*` to necessary resources and roles; use **separation of duties**.\n- Enforce MFA and short-lived sessions\n- Use **SCPs** to curb unnecessary discovery\n- Baseline expected reads and alert on spikes as **defense in depth**",
"Url": "https://hub.prowler.com/check/cloudtrail_threat_detection_enumeration"
}
},
"Categories": [
@@ -1,30 +1,43 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_threat_detection_llm_jacking",
"CheckTitle": "Ensure there are no potential LLM Jacking threats in CloudTrail.",
"CheckType": [],
"CheckTitle": "No potential LLM jacking activity detected in CloudTrail",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
"TTPs/Discovery",
"TTPs/Execution",
"TTPs/Defense Evasion",
"Effects/Resource Consumption",
"Unusual Behaviors/User"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsCloudTrailTrail",
"Description": "This check ensures that there are no potential LLM Jacking threats in CloudTrail. LLM Jacking attacks involve unauthorized access to cloud-hosted large language model (LLM) services, such as AWS Bedrock, by exploiting exposed credentials or vulnerabilities. These attacks can lead to resource hijacking, unauthorized model invocations, and high operational costs for the victim organization.",
"Risk": "Potential LLM Jacking threats in CloudTrail can lead to unauthorized access to sensitive AI models, stolen credentials, resource hijacking, or running costly workloads. Attackers may use reverse proxies or malicious credentials to sell access to models, exfiltrate sensitive data, or disrupt business operations.",
"RelatedUrl": "https://sysdig.com/blog/llmjacking-stolen-cloud-credentials-used-in-new-ai-attack/",
"Description": "**CloudTrail Bedrock activity** is analyzed per identity for a high diversity of LLM-related API calls (e.g., `InvokeModel`, `InvokeModelWithResponseStream`, `GetFoundationModelAvailability`). *If an identity's share of these actions exceeds a configured threshold over a recent window*, it is surfaced as potential **LLM-jacking** behavior.",
"Risk": "Such patterns suggest **stolen credential** abuse to drive LLM usage.\n- Availability: cost exhaustion and service disruption\n- Confidentiality: leakage of prompts/outputs and model settings\n- Integrity: misuse of permissions for broader access\nAttackers may use reverse proxies to resell access and obfuscate sources.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://furkangungor.medium.com/automating-anomaly-detection-in-aws-cloudtrail-logs-4efb2ad9b958",
"https://help.sumologic.com/docs/integrations/amazon-aws/amazon-bedrock/",
"https://dzone.com/articles/ai-powered-aws-cloudtrail-analysis-strands-agent-bedrock"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"NativeIaC": "```yaml\n# CloudFormation SCP that blocks all Amazon Bedrock actions to stop LLM jacking\nResources:\n <example_resource_name>:\n Type: AWS::Organizations::Policy\n Properties:\n Name: <example_resource_name>\n Type: SERVICE_CONTROL_POLICY\n TargetIds:\n - \"<example_resource_id>\" # CRITICAL: Attach SCP to the root/OU/account to enforce the deny\n Content:\n Version: \"2012-10-17\"\n Statement:\n - Sid: DenyBedrock\n Effect: Deny\n Action: \"bedrock:*\" # CRITICAL: Denies all Bedrock APIs (Invoke/Converse/list/entitlements/etc.)\n Resource: \"*\" # CRITICAL: Apply deny to all resources\n```",
"Other": "1. In the AWS Console, go to Organizations > Policies > Service control policies\n2. Click Create policy\n3. Set Name to <example_resource_name>\n4. In Policy, paste a deny for Bedrock:\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\"Sid\":\"DenyBedrock\",\"Effect\":\"Deny\",\"Action\":\"bedrock:*\",\"Resource\":\"*\"}]\n }\n5. Save the policy and click Attach\n6. Select the target (Root, OU, or the affected account ID <example_resource_id>) and attach the policy\n7. Wait for propagation; no further Bedrock calls will occur, and the finding will clear after the detection window elapses",
"Terraform": "```hcl\n# SCP denying all Amazon Bedrock actions; attach it to the root/OU/account to halt LLM jacking\nresource \"aws_organizations_policy\" \"main\" {\n name = \"<example_resource_name>\"\n type = \"SERVICE_CONTROL_POLICY\"\n\n content = jsonencode({\n Version = \"2012-10-17\"\n Statement = [{\n Sid = \"DenyBedrock\"\n Effect = \"Deny\"\n Action = \"bedrock:*\" // CRITICAL: blocks all Bedrock APIs (prevents further suspicious activity)\n Resource = \"*\" // CRITICAL: deny across all resources\n }]\n })\n}\n\nresource \"aws_organizations_policy_attachment\" \"attach\" {\n policy_id = aws_organizations_policy.main.id\n target_id = \"<example_resource_id>\" // CRITICAL: attach to the affected account/OU/root to enforce the deny\n}\n```"
},
"Recommendation": {
"Text": "To remediate this issue, enable detailed CloudTrail logging for Bedrock API calls, monitor suspicious activities, and secure sensitive credentials. Enable logging of model invocation inputs and outputs, and restrict access using IAM policies. Review CloudTrail logs regularly for suspicious `InvokeModel` actions or unauthorized access to models.",
"Url": "https://permiso.io/blog/exploiting-hosted-models"
"Text": "Apply **least privilege** to Bedrock; restrict `Invoke*` only to required roles and deny broadly via **SCPs** where unused. Enforce **MFA** and short-lived creds; rotate/remove exposed keys. Enable **model invocation logging** and budgets/quotas. Continuously monitor for Bedrock enumeration plus invoke bursts. Use **defense in depth** across identities and networks.",
"Url": "https://hub.prowler.com/check/cloudtrail_threat_detection_llm_jacking"
}
},
"Categories": [
"threat-detection"
"threat-detection",
"gen-ai"
],
"DependsOn": [],
"RelatedTo": [],
@@ -1,26 +1,34 @@
{
"Provider": "aws",
"CheckID": "cloudtrail_threat_detection_privilege_escalation",
"CheckTitle": "Ensure there are no potential privilege escalation threats in CloudTrail",
"CheckType": [],
"CheckTitle": "No potential privilege escalation activity detected in CloudTrail",
"CheckType": [
"TTPs/Privilege Escalation",
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis"
],
"ServiceName": "cloudtrail",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsCloudTrailTrail",
"Description": "This check ensures that there are no potential privilege escalation threats in CloudTrail.",
"Risk": "Potential privilege escalation threats in CloudTrail can lead to unauthorized access to resources.",
"Description": "**CloudTrail** activity is analyzed for **identities** executing high-risk actions linked to **privilege escalation** (e.g., `Attach*Policy`, `PassRole`, `AssumeRole`, `CreateAccessKey`). Identities exceeding a configurable share of such events within a *recent time window* are highlighted for investigation.",
"Risk": "Escalation patterns can grant elevated entitlements, enabling:\n- Confidentiality loss via unauthorized data/secret access\n- Integrity compromise by changing IAM policies/roles\n- Availability impact by tampering with logging or resources\nThis also facilitates lateral movement and persistence.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://rhinosecuritylabs.com/aws/aws-privilege-escalation-methods-mitigation/",
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-logging-data-events",
"https://signmycode.com/blog/what-is-privilege-escalation-in-aws-recommendations-to-prevent-it"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "",
"Terraform": ""
"NativeIaC": "```yaml\n# CloudFormation: Organization SCP to block common IAM privilege-escalation actions\nResources:\n <example_resource_name>:\n Type: AWS::Organizations::Policy\n Properties:\n Name: deny-iam-privesc\n Type: SERVICE_CONTROL_POLICY\n # Critical: This SCP denies risky IAM actions often used for privilege escalation\n # Explanation: Denying these actions organization-wide prevents future privesc activity detected by CloudTrail\n Content: |\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Deny\",\n \"Action\": [\n \"iam:AttachUserPolicy\",\n \"iam:AttachRolePolicy\",\n \"iam:PutUserPolicy\",\n \"iam:PutRolePolicy\",\n \"iam:PutGroupPolicy\",\n \"iam:AddUserToGroup\",\n \"iam:CreateAccessKey\",\n \"iam:CreateLoginProfile\",\n \"iam:UpdateLoginProfile\",\n \"iam:UpdateAssumeRolePolicy\",\n \"iam:CreatePolicyVersion\",\n \"iam:SetDefaultPolicyVersion\",\n \"iam:PassRole\"\n ],\n \"Resource\": \"*\"\n }\n ]\n }\n <example_resource_name>Attachment:\n Type: AWS::Organizations::PolicyAttachment\n Properties:\n # Critical: Attach the SCP so it is enforced\n PolicyId: !Ref <example_resource_name>\n TargetId: <example_resource_id> # OU, Root, or Account ID\n```",
"Other": "1. In AWS Console, open IAM and identify the AWS identity shown in the Prowler finding (user or role ARN)\n2. If it is an IAM user:\n - Go to Security credentials > Access keys, set active keys to Inactive\n - Go to Permissions, detach all managed policies and delete inline policies\n - Go to Groups, remove the user from privileged groups\n - Go to Console password, delete the login profile\n3. If it is an IAM role:\n - Go to Permissions, detach managed policies and delete inline policies\n - Go to Trust relationships, remove principals that should not assume the role and save\n4. Re-run the scan after the detection window elapses to confirm no further privilege-escalation activity is detected",
"Terraform": "```hcl\n# SCP to block common IAM privilege-escalation actions\nresource \"aws_organizations_policy\" \"<example_resource_name>\" {\n name = \"deny-iam-privesc\"\n type = \"SERVICE_CONTROL_POLICY\"\n\n # Critical: Deny risky IAM actions to prevent future privesc\n # Explanation: Blocks escalation techniques commonly seen in CloudTrail\n content = jsonencode({\n Version = \"2012-10-17\",\n Statement = [\n {\n Effect = \"Deny\",\n Action = [\n \"iam:AttachUserPolicy\",\n \"iam:AttachRolePolicy\",\n \"iam:PutUserPolicy\",\n \"iam:PutRolePolicy\",\n \"iam:PutGroupPolicy\",\n \"iam:AddUserToGroup\",\n \"iam:CreateAccessKey\",\n \"iam:CreateLoginProfile\",\n \"iam:UpdateLoginProfile\",\n \"iam:UpdateAssumeRolePolicy\",\n \"iam:CreatePolicyVersion\",\n \"iam:SetDefaultPolicyVersion\",\n \"iam:PassRole\"\n ],\n Resource = \"*\"\n }\n ]\n })\n}\n\nresource \"aws_organizations_policy_attachment\" \"<example_resource_name>_attach\" {\n # Critical: Attach the SCP so it takes effect\n policy_id = aws_organizations_policy.<example_resource_name>.id\n target_id = \"<example_resource_id>\" # OU, Root, or Account ID\n}\n```"
},
"Recommendation": {
"Text": "To remediate this issue, ensure that there are no potential privilege escalation threats in CloudTrail.",
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-logging-data-events"
"Text": "Apply **least privilege** and **defense in depth**:\n- Restrict `PassRole`, `Attach*Policy`, `UpdateAssumeRolePolicy`, `CreateAccessKey`\n- Enforce permission boundaries and SCPs\n- Require MFA and change approvals\n- Use multi-Region CloudTrail, immutable retention, and alerting on anomalous sequences",
"Url": "https://hub.prowler.com/check/cloudtrail_threat_detection_privilege_escalation"
}
},
"Categories": [
@@ -1,28 +1,34 @@
{
"Provider": "aws",
"CheckID": "dlm_ebs_snapshot_lifecycle_policy_exists",
"CheckTitle": "Ensure EBS Snapshot lifecycle policies are defined.",
"CheckTitle": "Region with EBS snapshots has at least one EBS snapshot lifecycle policy defined",
"CheckType": [
"Data Protection"
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "dlm",
"SubServiceName": "ebs",
"ResourceIdTemplate": "arn:aws:iam::account-id:resource-id",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"Description": "Ensure EBS Snapshot lifecycle policies are defined.",
"Risk": "With AWS DLM service, you can manage the lifecycle of your EBS volume snapshots. By automating the EBS volume backup management using lifecycle policies, you can protect your EBS data by enforcing a regular backup schedule, retain backups as required by auditors or internal compliance.",
"RelatedUrl": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshot-lifecycle.html#dlm-elements",
"Description": "**EBS snapshots** are expected to be governed by **Data Lifecycle Manager (DLM) policies** in each Region where snapshots exist.\n\nThe evaluation looks for lifecycle policies that automate snapshot creation, retention, and cleanup for those snapshots.",
"Risk": "Without **automated lifecycle policies**, backups become inconsistent and error-prone, reducing availability and weakening recovery objectives. Missing retention rules cause premature deletion or snapshot sprawl, increasing cost and exposing stale data. Lack of cross-Region/account copies limits resilience to regional outages and malicious deletion.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/DLM/ebs-snapshot-automation.html",
"https://repost.aws/articles/ARmYgZmA8MRQi89pWd9D7eFw/how-to-create-a-automate-backup-aws-data-lifecycle-management-using-snapshots",
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshot-lifecycle.html#dlm-elements"
],
"Remediation": {
"Code": {
"CLI": "aws dlm create-lifecycle-policy --region <region> --execution-role-arn <execution-role-arn> --description <description> --state ENABLED --policy-details file://lifecycle-policy-config.json",
"NativeIaC": "",
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/DLM/ebs-snapshot-automation.html",
"Terraform": ""
"CLI": "aws dlm create-lifecycle-policy --region <region> --execution-role-arn <execution-role-arn> --description \"<description>\" --state ENABLED --policy-details '{\"PolicyType\":\"EBS_SNAPSHOT_MANAGEMENT\",\"ResourceTypes\":[\"VOLUME\"],\"TargetTags\":[{\"Key\":\"<tag_key>\",\"Value\":\"<tag_value>\"}],\"Schedules\":[{\"CreateRule\":{\"Interval\":24,\"IntervalUnit\":\"HOURS\"},\"RetainRule\":{\"Count\":1}}]}'",
"NativeIaC": "```yaml\n# CloudFormation: minimal EBS snapshot lifecycle policy\nResources:\n <example_resource_name>:\n Type: AWS::DLM::LifecyclePolicy\n Properties:\n Description: \"<description>\"\n ExecutionRoleArn: \"<example_resource_arn>\"\n State: ENABLED # Critical: enables the policy so it is counted by the check\n PolicyDetails:\n PolicyType: EBS_SNAPSHOT_MANAGEMENT # Critical: creates an EBS snapshot lifecycle policy\n ResourceTypes: [VOLUME]\n TargetTags:\n - Key: \"<tag_key>\" # Critical: selects target volumes by tag\n Value: \"<tag_value>\"\n Schedules:\n - CreateRule:\n Interval: 24\n IntervalUnit: HOURS\n RetainRule:\n Count: 1\n```",
"Other": "1. In the AWS console, switch to the Region that has EBS snapshots\n2. Open EC2 > Lifecycle Manager (DLM) > Create lifecycle policy\n3. Select EBS snapshot policy; Target resource: Volumes\n4. Add Target tags: Key = <tag_key>, Value = <tag_value>\n5. Set Schedule: Create every 24 hours; Retain 1 snapshot\n6. Ensure State is Enabled and click Create policy",
"Terraform": "```hcl\n# Terraform: minimal EBS snapshot lifecycle policy\nresource \"aws_dlm_lifecycle_policy\" \"<example_resource_name>\" {\n description = \"<description>\"\n execution_role_arn = \"<example_resource_arn>\"\n state = \"ENABLED\" # Critical: enables the policy so it is counted by the check\n\n policy_details {\n policy_type = \"EBS_SNAPSHOT_MANAGEMENT\" # Critical: creates an EBS snapshot lifecycle policy\n resource_types = [\"VOLUME\"]\n target_tags = {\n \"<tag_key>\" = \"<tag_value>\" # Critical: selects target volumes by tag\n }\n schedule {\n create_rule {\n interval = 24\n interval_unit = \"HOURS\"\n }\n retain_rule {\n count = 1\n }\n }\n }\n}\n```"
},
"Recommendation": {
"Text": "To use Amazon Data Lifecycle Manager (DLM) service to manage the lifecycle of your EBS volume snapshots, you have to tag your AWS EBS volumes and create data lifecycle policies via Amazon DLM.",
"Url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshot-lifecycle.html#dlm-elements"
"Text": "Implement **DLM lifecycle policies** for all volumes that require backup.\n\n- Schedule creations to meet RPO/RTO\n- Define retention to prevent sprawl and enforce least data exposure\n- Use **least privilege** roles and separation of duties\n- Copy snapshots to another Region/account for **defense in depth**\n- Monitor policy health and coverage with tags",
"Url": "https://hub.prowler.com/check/dlm_ebs_snapshot_lifecycle_policy_exists"
}
},
"Categories": [
@@ -1,31 +1,38 @@
{
"Provider": "aws",
"CheckID": "dms_endpoint_mongodb_authentication_enabled",
"CheckTitle": "Check if DMS endpoints for MongoDB have an authentication mechanism enabled.",
"CheckTitle": "DMS MongoDB endpoint has an authentication mechanism enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:dms:region:account-id:endpoint/endpoint-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsEndpoint",
"Description": "This control checks whether an AWS DMS endpoint for MongoDB is configured with an authentication mechanism. The control fails if an authentication type isn't set for the endpoint.",
"Risk": "Without an authentication mechanism enabled, unauthorized users may gain access to sensitive data during migration, increasing the risk of data breaches and security incidents.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html",
"Description": "**AWS DMS MongoDB endpoints** use an authentication mechanism. Configuration expects `AuthType` not `no` (e.g., `password`) with an `authMechanism` such as `scram_sha_1` or `mongodb_cr`.",
"Risk": "Without authentication, unauthenticated connections can access the source, degrading **confidentiality** and **integrity**. Adversaries could read or modify migrated documents, hijack CDC, inject data, or exfiltrate records during replication.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-11"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --username <username> --password <password> --authentication-type <authentication-type>",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-11",
"Terraform": ""
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --mongodb-settings '{\"AuthType\":\"password\"}' --username <username> --password <password>",
"NativeIaC": "```yaml\n# CloudFormation: enable authentication on a MongoDB DMS endpoint\nResources:\n <example_resource_name>:\n Type: AWS::DMS::Endpoint\n Properties:\n EndpointIdentifier: <example_resource_name>\n EndpointType: source\n EngineName: mongodb\n MongoDbSettings:\n AuthType: password # CRITICAL: sets authentication mode to 'password' so auth is enabled\n```",
"Other": "1. In the AWS Console, go to Database Migration Service > Endpoints\n2. Select the MongoDB endpoint and click Modify\n3. Under MongoDB settings, set Authentication mode to Password\n4. Enter Username and Password\n5. Click Save changes",
"Terraform": "```hcl\n# Terraform: enable authentication on a MongoDB DMS endpoint\nresource \"aws_dms_endpoint\" \"<example_resource_name>\" {\n endpoint_id = \"<example_resource_name>\"\n endpoint_type = \"source\"\n engine_name = \"mongodb\"\n\n mongodb_settings {\n auth_type = \"password\" # CRITICAL: enables authentication for the MongoDB endpoint\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable an authentication mechanism on DMS endpoints for MongoDB to ensure secure access control during migration.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html"
"Text": "Enforce **strong authentication** on MongoDB endpoints: set `AuthType` to `password` and use `authMechanism` like `scram_sha_1`. Apply **least privilege** database accounts, store secrets in **Secrets Manager**, and pair with **TLS** for defense in depth.",
"Url": "https://hub.prowler.com/check/dms_endpoint_mongodb_authentication_enabled"
}
},
"Categories": [],
"Categories": [
"identity-access"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,38 @@
{
"Provider": "aws",
"CheckID": "dms_endpoint_neptune_iam_authorization_enabled",
"CheckTitle": "Check if DMS endpoints for Neptune databases have IAM authorization enabled.",
"CheckTitle": "DMS endpoint for Neptune has IAM authorization enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:dms:region:account-id:endpoint/endpoint-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsEndpoint",
"Description": "This control checks whether an AWS DMS endpoint for an Amazon Neptune database is configured with IAM authorization. The control fails if the DMS endpoint doesn't have IAM authorization enabled.",
"Risk": "Without IAM authorization, DMS endpoints for Neptune databases may lack granular access control, increasing the risk of unauthorized access to sensitive data.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html",
"Description": "**DMS Neptune endpoints** have **IAM authorization** enabled via the endpoint setting `IamAuthEnabled`.",
"Risk": "Without **IAM authorization**, migration components can interact with Neptune using broad trust, enabling unauthorized data loads, reads, or alterations.\n\nThis degrades **confidentiality** and **integrity** and increases the chance of privilege abuse and data exfiltration.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-10"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --service-access-role-arn <iam-role-arn>",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-10",
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --neptune-settings '{\"IamAuthEnabled\":true}'",
"NativeIaC": "```yaml\n# CloudFormation: Enable IAM authorization on a DMS Neptune endpoint\nResources:\n <example_resource_name>:\n Type: AWS::DMS::Endpoint\n Properties:\n EndpointType: target\n EngineName: neptune\n NeptuneSettings:\n ServiceAccessRoleArn: <example_resource_arn>\n S3BucketName: <example_resource_name>\n S3BucketFolder: <example_resource_name>\n IamAuthEnabled: true # Critical: enables IAM authorization for the Neptune endpoint\n```",
"Other": "1. In the AWS Console, go to Database Migration Service > Endpoints\n2. Select the Neptune endpoint and click Modify\n3. Expand Endpoint settings (Neptune settings) and set IAM authorization to Enabled\n4. Ensure Service access role ARN is set, then click Save",
"Terraform": ""
},
"Recommendation": {
"Text": "Enable IAM authorization on DMS endpoints for Neptune databases by specifying a service role in the ServiceAccessRoleARN parameter.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html"
"Text": "Enable **IAM authorization** on Neptune endpoints (`IamAuthEnabled=true`) and use a **least privilege** service role limited to minimal Neptune and S3 permissions.\n\nApply **defense in depth**: restrict network paths, separate duties for migration roles, and monitor access with logs and alerts.",
"Url": "https://hub.prowler.com/check/dms_endpoint_neptune_iam_authorization_enabled"
}
},
"Categories": [],
"Categories": [
"identity-access"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,41 @@
{
"Provider": "aws",
"CheckID": "dms_endpoint_redis_in_transit_encryption_enabled",
"CheckTitle": "Check if DMS endpoints for Redis OSS are encrypted in transit.",
"CheckTitle": "DMS endpoint for Redis OSS is encrypted in transit",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices/Encryption in Transit",
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls (USA)",
"Software and Configuration Checks/Industry and Regulatory Standards/PCI-DSS",
"Software and Configuration Checks/Industry and Regulatory Standards/ISO 27001 Controls"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:dms:region:account-id:endpoint/endpoint-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsEndpoint",
"Description": "This control checks whether an AWS DMS endpoint for Redis OSS is configured with a TLS connection. The control fails if the endpoint doesn't have TLS enabled.",
"Risk": "Without TLS, data transmitted between databases may be vulnerable to interception or eavesdropping, increasing the risk of data breaches and other security incidents.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Redis.html",
"Description": "**DMS Redis OSS endpoints** are assessed for the presence of **TLS** in their endpoint settings, such as `ssl-encryption`, indicating encrypted connections between the DMS replication instance and Redis.",
"Risk": "Without **TLS**, traffic between DMS and Redis can be intercepted or altered, compromising **confidentiality** and **integrity**.\n\nAttackers can perform **man-in-the-middle** interception, steal auth tokens, and inject or corrupt migrated data.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-12",
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Redis.html#CHAP_Target.Redis.EndpointSettings",
"https://support.icompaas.com/support/solutions/articles/62000233450-ensure-encryption-in-transit-for-dms-endpoints-for-redis-oss"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --redis-settings '{'SslSecurityProtocol': 'ssl-encryption'}'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-12",
"Terraform": ""
"CLI": "",
"NativeIaC": "```yaml\n# CloudFormation: Enable TLS for Redis OSS DMS endpoint\nResources:\n <example_resource_name>:\n Type: AWS::DMS::Endpoint\n Properties:\n EndpointIdentifier: <example_resource_name>\n EndpointType: target\n EngineName: redis\n RedisSettings:\n ServerName: <example_resource_name>\n Port: 6379\n AuthType: none\n SslSecurityProtocol: ssl-encryption # Critical: enables TLS for in-transit encryption\n```",
"Other": "1. In the AWS Console, go to Database Migration Service > Endpoints\n2. Select the Redis OSS endpoint and click Modify\n3. Set SSL security protocol (Encryption in transit) to \"SSL encryption\"\n4. Save changes",
"Terraform": "```hcl\n# Enable TLS for Redis OSS DMS endpoint\nresource \"aws_dms_endpoint\" \"<example_resource_name>\" {\n endpoint_id = \"<example_resource_id>\"\n endpoint_type = \"target\"\n engine_name = \"redis\"\n\n redis_settings {\n server_name = \"<example_resource_name>\"\n port = 6379\n auth_type = \"none\"\n ssl_security_protocol = \"ssl-encryption\" # Critical: enables TLS for in-transit encryption\n }\n}\n```"
},
"Recommendation": {
"Text": "Enable TLS for DMS endpoints for Redis OSS to ensure encrypted communication during data migration.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Redis.html#CHAP_Target.Redis.EndpointSettings"
"Text": "Enable **TLS** on Redis OSS endpoints (e.g., `ssl-encryption`) and require server certificate validation. Prohibit plaintext connections, prefer private networking, and enforce **least privilege** for DMS roles to strengthen **defense in depth**.",
"Url": "https://hub.prowler.com/check/dms_endpoint_redis_in_transit_encryption_enabled"
}
},
"Categories": [],
"Categories": [
"encryption"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,32 +1,40 @@
{
"Provider": "aws",
"CheckID": "dms_endpoint_ssl_enabled",
"CheckTitle": "Ensure SSL mode is enabled in DMS endpoint",
"CheckType": ["Effects", "Data Exposure"],
"CheckTitle": "DMS endpoint has SSL enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure"
],
"ServiceName": "dms",
"SubServiceName": "endpoint",
"ResourceIdTemplate": "arn:partition:dms:region:account-id:endpoint:resource-id",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "high",
"ResourceType": "AwsDmsEndpoint",
"Description": "This check ensures that SSL mode is enabled for all AWS Database Migration Service (DMS) endpoints. Enabling SSL provides encryption in transit for data transferred through these endpoints.",
"Risk": "Without SSL enabled, data transferred through DMS endpoints is not encrypted, potentially exposing sensitive information to unauthorized access or interception during transit.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.SSL.html",
"Description": "**AWS DMS endpoints** have their SSL/TLS mode inspected; any value other than `none` denotes encrypted connections between the replication instance and databases.\n\nSupported modes include `require`, `verify-ca`, and `verify-full`.",
"Risk": "Without TLS, data in transit can be read or altered, affecting:\n- **Confidentiality** via packet sniffing and credential leakage\n- **Integrity** through **MITM** tampering of migration streams\n- **Availability** from session hijack or task disruption",
"RelatedUrl": "",
"AdditionalURLs": [
"https://aws.amazon.com/blogs/database/configuring-ssl-encryption-on-oracle-and-postgresql-endpoints-in-aws-dms/",
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.SSL.html",
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-9"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint_arn> --ssl-mode require",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-9",
"Terraform": ""
},
"Recommendation": {
"Text": "Enable SSL mode for all DMS endpoints. Use 'require' as the minimum SSL mode, and consider using 'verify-ca' or 'verify-full' for higher security.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.SSL.html"
}
"Code": {
"CLI": "aws dms modify-endpoint --endpoint-arn <endpoint-arn> --ssl-mode require",
"NativeIaC": "```yaml\n# CloudFormation: Set SSL on a DMS endpoint\nResources:\n <example_resource_name>:\n Type: AWS::DMS::Endpoint\n Properties:\n EndpointIdentifier: <example_resource_name>\n EndpointType: source\n EngineName: sqlserver\n ServerName: <server_name>\n Port: 1433\n Username: <username>\n Password: <password>\n SslMode: require # CRITICAL: enables SSL (not \"none\"), fixing the finding\n```",
"Other": "1. In the AWS DMS console, go to Endpoints\n2. Select the non-compliant endpoint and choose Modify\n3. Set SSL mode to Require (or Verify-ca/Verify-full if required by your engine and certificate is available)\n4. If Verify-ca/Verify-full is selected, choose the appropriate CA certificate\n5. Save changes, then Test connection to confirm",
"Terraform": "```hcl\n# Terraform: Set SSL on a DMS endpoint\nresource \"aws_dms_endpoint\" \"<example_resource_name>\" {\n endpoint_id = \"<example_resource_name>\"\n endpoint_type = \"source\"\n engine_name = \"sqlserver\"\n server_name = \"<server_name>\"\n port = 1433\n username = \"<username>\"\n password = \"<password>\"\n\n ssl_mode = \"require\" # CRITICAL: enables SSL (not \"none\"), fixing the finding\n}\n```"
},
"Recommendation": {
"Text": "Configure endpoints to use SSL/TLS at least `require`; prefer `verify-ca` or `verify-full` where supported. Manage trusted CA material and rotate regularly. Apply **defense in depth** with private connectivity and strict IAM, and enforce this posture via policy-as-code and continuous validation.",
"Url": "https://hub.prowler.com/check/dms_endpoint_ssl_enabled"
}
},
"Categories": [
"encryption"
"encryption"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}
}
@@ -1,29 +1,39 @@
{
"Provider": "aws",
"CheckID": "dms_instance_minor_version_upgrade_enabled",
"CheckTitle": "Ensure DMS instances have auto minor version upgrade enabled.",
"CheckType": [],
"CheckTitle": "DMS replication instance has auto minor version upgrade enabled",
"CheckType": [
"Software and Configuration Checks/Patch Management",
"Software and Configuration Checks/AWS Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rdmsds:region:account-id:rep",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsReplicationInstance",
"Description": "Ensure DMS instances have auto minor version upgrade enabled.",
"Risk": "Ensure that your Amazon Database Migration Service (DMS) replication instances have the Auto Minor Version Upgrade feature enabled in order to receive automatically minor engine upgrades.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-6",
"Description": "**AWS DMS replication instances** are evaluated for the `auto_minor_version_upgrade` setting to confirm **automatic minor engine updates** are enabled during the maintenance window.",
"Risk": "Without **automatic minor upgrades**, DMS engines can miss security patches and fixes, enabling exploitation of known flaws and instability.\n- Confidentiality: exposure via unpatched components\n- Integrity: replication errors or data drift\n- Availability: outages during migration or CDC",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-6",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/DMS/auto-minor-version-upgrade.html"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-replication-instance --region <REGION> --replication-instance-arn arn:aws:dms:<REGION>:<ACCOUNT_ID>:rep:<REPLICATION_ID> --auto-minor-version-upgrade --apply-immediately",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/auto-minor-version-upgrade.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/auto-minor-version-upgrade.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/auto-minor-version-upgrade.html#"
"NativeIaC": "```yaml\n# CloudFormation: Enable auto minor version upgrade on a DMS replication instance\nResources:\n <example_resource_name>:\n Type: AWS::DMS::ReplicationInstance\n Properties:\n ReplicationInstanceIdentifier: <example_resource_id>\n ReplicationInstanceClass: dms.t3.micro\n AutoMinorVersionUpgrade: true # CRITICAL: turns on automatic minor version upgrades\n```",
"Other": "1. Open the AWS Console and go to Database Migration Service (DMS)\n2. Click Replication instances and select your instance\n3. Choose Actions > Modify\n4. Check Auto minor version upgrade\n5. Select Apply immediately\n6. Click Modify to save",
"Terraform": "```hcl\n# Terraform: Enable auto minor version upgrade on a DMS replication instance\nresource \"aws_dms_replication_instance\" \"<example_resource_name>\" {\n replication_instance_id = \"<example_resource_id>\"\n replication_instance_class = \"dms.t3.micro\"\n auto_minor_version_upgrade = true # CRITICAL: turns on automatic minor version upgrades\n}\n```"
},
"Recommendation": {
"Text": "Enable auto minor version upgrade for all DMS replication instances.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-6"
"Text": "Enable `auto_minor_version_upgrade` on all replication instances to maintain **continuous patching**.\n- Set a maintenance window and validate in non-prod\n- Monitor release notes and health metrics\n- Enforce **least privilege** for change control\n- Keep **backups** for rollback",
"Url": "https://hub.prowler.com/check/dms_instance_minor_version_upgrade_enabled"
}
},
"Categories": [],
"Categories": [
"vulnerabilities"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,30 +1,37 @@
{
"Provider": "aws",
"CheckID": "dms_instance_multi_az_enabled",
"CheckTitle": "Ensure DMS instances have multi az enabled.",
"CheckType": [],
"CheckTitle": "DMS replication instance has Multi-AZ enabled",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Denial of Service"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rdmsds:region:account-id:rep",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsReplicationInstance",
"Description": "Ensure DMS instances have multi az enabled.",
"Risk": "Ensure that your Amazon Database Migration Service (DMS) replication instances are using Multi-AZ deployment configurations to provide High Availability (HA) through automatic failover to standby replicas in the event of a failure such as an Availability Zone (AZ) outage, an internal hardware or network outage, a software failure or in case of a planned maintenance session.",
"RelatedUrl": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#",
"Description": "**AWS DMS replication instances** are evaluated for **Multi-AZ** configuration. Instances with `multi_az` enabled are treated as having a cross-AZ standby; those without it are identified as single-AZ.",
"Risk": "Without **Multi-AZ**, a single-AZ failure or maintenance event can halt migrations, causing extended downtime (**availability**) and replication gaps or rollbacks (**integrity**). Tasks may stall, increase cutover risk, and require manual recovery when the replication instance is unavailable.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/DMS/multi-az.html"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-replication-instance --region <REGION> --replication-instance-arn arn:aws:dms:<REGION>:<ACCOUNT_ID>:rep:<REPLICATION_ID> --multi-az --apply-immediately",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#"
"CLI": "aws dms modify-replication-instance --replication-instance-arn arn:aws:dms:<REGION>:<ACCOUNT_ID>:rep:<REPLICATION_ID> --multi-az --apply-immediately",
"NativeIaC": "```yaml\n# CloudFormation: enable Multi-AZ on a DMS replication instance\nResources:\n <example_resource_name>:\n Type: AWS::DMS::ReplicationInstance\n Properties:\n ReplicationInstanceClass: dms.t3.micro\n MultiAZ: true # Critical: enables Multi-AZ to pass the check\n```",
"Other": "1. Open the AWS DMS console\n2. Go to Replication instances and select your instance\n3. Click Modify\n4. Check Multi-AZ\n5. Check Apply immediately\n6. Click Modify to save",
"Terraform": "```hcl\n# Enable Multi-AZ on a DMS replication instance\nresource \"aws_dms_replication_instance\" \"<example_resource_name>\" {\n replication_instance_id = \"<example_resource_name>\"\n replication_instance_class = \"dms.t3.micro\"\n multi_az = true # Critical: enables Multi-AZ to pass the check\n}\n```"
},
"Recommendation": {
"Text": "Enable multi az for all DMS replication instances.",
"Url": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/multi-az.html#"
"Text": "Enable **Multi-AZ** (set `multi_az` to `true`) on DMS replication instances that handle production or time-sensitive migrations to ensure redundancy and automatic failover.\n\nApply HA principles: distribute across AZs, test failover, monitor health, and plan maintenance to minimize impact.",
"Url": "https://hub.prowler.com/check/dms_instance_multi_az_enabled"
}
},
"Categories": [
"redundancy"
"resilience"
],
"DependsOn": [],
"RelatedTo": [],
@@ -1,26 +1,37 @@
{
"Provider": "aws",
"CheckID": "dms_instance_no_public_access",
"CheckTitle": "Ensure DMS instances are not publicly accessible.",
"CheckType": [],
"CheckTitle": "DMS replication instance is not publicly exposed to the Internet",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
"TTPs/Initial Access"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:rdmsds:region:account-id:rep",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "AwsDmsReplicationInstance",
"Description": "Ensure DMS instances are not publicly accessible.",
"Risk": "Ensure that your Amazon Database Migration Service (DMS) are not publicly accessible from the Internet in order to avoid exposing private data and minimize security risks. A DMS replication instance should have a private IP address and the Publicly Accessible feature disabled when both the source and the target databases are in the same network that is connected to the instance's VPC through a VPN, VPC peering connection, or using an AWS Direct Connect dedicated connection.",
"RelatedUrl": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-1",
"Description": "**AWS DMS replication instances** are evaluated for **public exposure**. Exposure is identified when `PubliclyAccessible` is enabled and an attached security group allows inbound traffic from any address. Private or allowlisted instances are not considered exposed.",
"Risk": "Publicly reachable replication instances threaten:\n- Confidentiality: migration data and credentials can be intercepted or exfiltrated.\n- Integrity: attackers may alter tasks or inject records.\n- Availability: abuse or DDoS can stall replication and delay cutovers.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-1",
"https://docs.aws.amazon.com/amazonq/detector-library/terraform/restrict-public-access-dms-terraform/",
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/DMS/publicly-accessible.html",
"https://support.icompaas.com/support/solutions/articles/62000233448-ensure-dms-instances-are-not-publicly-accessible"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/publicly-accessible.html#",
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/publicly-accessible.html#",
"Terraform": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/DMS/publicly-accessible.html#"
"NativeIaC": "```yaml\n# CloudFormation: DMS instance not publicly accessible\nResources:\n <example_resource_name>:\n Type: AWS::DMS::ReplicationInstance\n Properties:\n ReplicationInstanceClass: dms.t3.micro\n PubliclyAccessible: false # Critical: disables public access to prevent Internet exposure\n```",
"Other": "1. In the AWS Console, open Database Migration Service > Replication instances and select the instance\n2. In Details > Networking, click each attached Security Group ID to open it in the EC2 console\n3. In Inbound rules, delete any rule with Source 0.0.0.0/0 or ::/0\n4. Save rules for each security group",
"Terraform": "```hcl\n# DMS instance not publicly accessible\nresource \"aws_dms_replication_instance\" \"<example_resource_name>\" {\n replication_instance_id = \"<example_resource_id>\"\n replication_instance_class = \"dms.t3.micro\"\n publicly_accessible = false # Critical: disables public access to prevent Internet exposure\n}\n```"
},
"Recommendation": {
"Text": "Restrict DMS Replication instances security groups to only required IPs, or re-create these instances that is only accessible privately.",
"Url": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-1"
"Text": "Adopt a **private-only** design:\n- Disable `PubliclyAccessible`; place instances in private subnets.\n- Enforce **least privilege** security groups (no `0.0.0.0/0`); allow only required sources/ports.\n- Provide access via **VPN**, peering, or Direct Connect.\n- Layer controls (ACLs, monitoring) and restrict IAM to necessary actions.",
"Url": "https://hub.prowler.com/check/dms_instance_no_public_access"
}
},
"Categories": [
@@ -1,31 +1,39 @@
{
"Provider": "aws",
"CheckID": "dms_replication_task_source_logging_enabled",
"CheckTitle": "Check if DMS replication tasks for the source database have logging enabled.",
"CheckTitle": "DMS replication task has logging enabled and SOURCE_CAPTURE and SOURCE_UNLOAD components set to at least Default severity",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Defense Evasion"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:dms:region:account-id:task/task-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsReplicationTask",
"Description": "This control checks whether logging is enabled with the minimum severity level of LOGGER_SEVERITY_DEFAULT for DMS replication tasks SOURCE_CAPTURE and SOURCE_UNLOAD. The control fails if logging isn't enabled for these tasks or if the minimum severity level is less than LOGGER_SEVERITY_DEFAULT.",
"Risk": "Without logging enabled, issues in data migration may go undetected, affecting the integrity and compliance of replicated data.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Monitoring.html#CHAP_Monitoring.ManagingLogs",
"Description": "**AWS DMS replication tasks** have **logging enabled** and configure `SOURCE_CAPTURE` and `SOURCE_UNLOAD` with severity at least `LOGGER_SEVERITY_DEFAULT` (or higher: `LOGGER_SEVERITY_DEBUG`, `LOGGER_SEVERITY_DETAILED_DEBUG`).",
"Risk": "Missing or low-severity source logs hinder visibility into **CDC** and full-load activity, risking undetected errors, stalls, or tampering. This can cause silent **data drift**, broken lineage, and failed recoveries, undermining **integrity** and **availability** and weakening auditability during investigations.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Monitoring.html",
"https://repost.aws/knowledge-center/dms-debug-logging",
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-8"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-replication-task --replication-task-arn <task-arn> --task-settings '{\"Logging\":{\"EnableLogging\":true,\"LogComponents\":[{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]}}'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-8",
"Terraform": ""
"CLI": "aws dms modify-replication-task --replication-task-arn <example_resource_arn> --replication-task-settings '{\"Logging\":{\"EnableLogging\":true,\"LogComponents\":[{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]}}'",
"NativeIaC": "```yaml\n# CloudFormation: enable DMS source logging at minimum DEFAULT severity\nResources:\n <example_resource_name>:\n Type: AWS::DMS::ReplicationTask\n Properties:\n ReplicationInstanceArn: <example_resource_arn>\n SourceEndpointArn: <example_resource_arn>\n TargetEndpointArn: <example_resource_arn>\n MigrationType: full-load\n TableMappings: '{\"rules\":[]}'\n # Critical: Enables logging and sets SOURCE components to at least DEFAULT\n ReplicationTaskSettings: |\n {\n \"Logging\": {\n \"EnableLogging\": true,\n \"LogComponents\": [\n {\"Id\": \"SOURCE_CAPTURE\", \"Severity\": \"LOGGER_SEVERITY_DEFAULT\"},\n {\"Id\": \"SOURCE_UNLOAD\", \"Severity\": \"LOGGER_SEVERITY_DEFAULT\"}\n ]\n }\n }\n```",
"Other": "1. In the AWS console, go to Database Migration Service > Database migration tasks\n2. Select the task and choose Modify\n3. Click Modify task logging\n4. Turn on Enable logging\n5. For SOURCE_CAPTURE and SOURCE_UNLOAD, set Severity to Default (or higher)\n6. Save/Modify to apply",
"Terraform": "```hcl\n# Enable DMS source logging at minimum DEFAULT severity\nresource \"aws_dms_replication_task\" \"<example_resource_name>\" {\n replication_instance_arn = \"<example_resource_arn>\"\n source_endpoint_arn = \"<example_resource_arn>\"\n target_endpoint_arn = \"<example_resource_arn>\"\n migration_type = \"full-load\"\n table_mappings = \"{\\\"rules\\\":[]}\"\n\n # Critical: Enables logging and sets SOURCE components to at least DEFAULT\n replication_task_settings = <<JSON\n{\n \"Logging\": {\n \"EnableLogging\": true,\n \"LogComponents\": [\n {\"Id\": \"SOURCE_CAPTURE\", \"Severity\": \"LOGGER_SEVERITY_DEFAULT\"},\n {\"Id\": \"SOURCE_UNLOAD\", \"Severity\": \"LOGGER_SEVERITY_DEFAULT\"}\n ]\n }\n}\nJSON\n}\n```"
},
"Recommendation": {
"Text": "Enable logging for source database DMS replication tasks with a minimum severity level of LOGGER_SEVERITY_DEFAULT.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.Logging.html"
"Text": "Enable and standardize **task logging** for `SOURCE_CAPTURE` and `SOURCE_UNLOAD` at `LOGGER_SEVERITY_DEFAULT` or higher.\n- Centralize logs and alert on anomalies\n- Enforce **least privilege** for log access\n- Set retention to support audits\n- Avoid prolonged `DEBUG` levels, *except during troubleshooting*, to balance visibility and cost",
"Url": "https://hub.prowler.com/check/dms_replication_task_source_logging_enabled"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
@@ -1,31 +1,40 @@
{
"Provider": "aws",
"CheckID": "dms_replication_task_target_logging_enabled",
"CheckTitle": "Check if DMS replication tasks for the target database have logging enabled.",
"CheckTitle": "DMS replication task has TARGET_APPLY and TARGET_LOAD logging enabled with at least default severity",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Defense Evasion"
],
"ServiceName": "dms",
"SubServiceName": "",
"ResourceIdTemplate": "arn:aws:dms:region:account-id:task/task-id",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "AwsDmsReplicationTask",
"Description": "This control checks whether logging is enabled with the minimum severity level of LOGGER_SEVERITY_DEFAULT for DMS replication tasks TARGET_APPLY and TARGET_LOAD. The control fails if logging isn't enabled for these tasks or if the minimum severity level is less than LOGGER_SEVERITY_DEFAULT.",
"Risk": "Without logging enabled, issues in data migration may go undetected, affecting the integrity and compliance of replicated data.",
"RelatedUrl": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Monitoring.html#CHAP_Monitoring.ManagingLogs",
"Description": "**AWS DMS replication tasks** have target logging enabled, including `TARGET_APPLY` and `TARGET_LOAD`, each set to at least `LOGGER_SEVERITY_DEFAULT`.",
"Risk": "Insufficient target logging limits visibility into load/apply activity, masking failures and anomalies. This risks **data integrity** (silent drift, partial loads) and **availability** (longer incident resolution), and reduces **auditability** of migration events.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://repost.aws/knowledge-center/dms-debug-logging",
"https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.Logging.html",
"https://stackoverflow.com/questions/46913913/aws-dms-with-cloudformation-enabling-logging-needs-a-log-group",
"https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-7"
],
"Remediation": {
"Code": {
"CLI": "aws dms modify-replication-task --replication-task-arn <task-arn> --task-settings '{\"Logging\":{\"EnableLogging\":true,\"LogComponents\":[{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]}}'",
"NativeIaC": "",
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/dms-controls.html#dms-7",
"Terraform": ""
"CLI": "aws dms modify-replication-task --replication-task-arn <task-arn> --replication-task-settings '{\"Logging\":{\"EnableLogging\":true,\"LogComponents\":[{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]}}'",
"NativeIaC": "```yaml\n# CloudFormation: enable DMS task logging for target components\nResources:\n <example_resource_name>:\n Type: AWS::DMS::ReplicationTask\n Properties:\n ReplicationInstanceArn: <example_resource_arn>\n SourceEndpointArn: <example_resource_arn>\n TargetEndpointArn: <example_resource_arn>\n MigrationType: full-load\n TableMappings: |\n {\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%\",\"table-name\":\"%\"},\"rule-action\":\"include\"}]}\n ReplicationTaskSettings: |\n {\"Logging\":{\"EnableLogging\":true, \"LogComponents\":[\n {\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}, # Critical: ensure TARGET_APPLY logging at default\n {\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"} # Critical: ensure TARGET_LOAD logging at default\n ]}}\n```",
"Other": "1. Open the AWS DMS console and go to Database migration tasks\n2. Select the replication task and choose Modify\n3. Expand Task settings (JSON) or Logging\n4. Enable CloudWatch logs (EnableLogging = true)\n5. Set log components:\n - TARGET_APPLY severity: DEFAULT\n - TARGET_LOAD severity: DEFAULT\n6. Save changes (Modify task), then rerun the task if required",
"Terraform": "```hcl\n# Enable DMS task logging for target components\nresource \"aws_dms_replication_task\" \"<example_resource_name>\" {\n replication_task_id = \"<example_resource_id>\"\n replication_instance_arn = \"<example_resource_arn>\"\n source_endpoint_arn = \"<example_resource_arn>\"\n target_endpoint_arn = \"<example_resource_arn>\"\n migration_type = \"full-load\"\n table_mappings = jsonencode({ rules = [{\n \"rule-type\" : \"selection\", \"rule-id\" : \"1\", \"rule-name\" : \"1\",\n \"object-locator\" : { \"schema-name\" : \"%\", \"table-name\" : \"%\" },\n \"rule-action\" : \"include\"\n }]} )\n\n # Critical: enables logging and sets TARGET_APPLY and TARGET_LOAD to minimum required severity\n replication_task_settings = jsonencode({\n Logging = {\n EnableLogging = true\n LogComponents = [\n { Id = \"TARGET_APPLY\", Severity = \"LOGGER_SEVERITY_DEFAULT\" },\n { Id = \"TARGET_LOAD\", Severity = \"LOGGER_SEVERITY_DEFAULT\" }\n ]\n }\n })\n}\n```"
},
"Recommendation": {
"Text": "Enable logging for target database DMS replication tasks with a minimum severity level of LOGGER_SEVERITY_DEFAULT.",
"Url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.Logging.html"
"Text": "Enable and maintain **CloudWatch logging** at `LOGGER_SEVERITY_DEFAULT` or higher for target components:\n- Configure `TARGET_APPLY` and `TARGET_LOAD`\n- Enforce least-privilege log access\n- Monitor logs/alerts for anomalies\n- Standardize task settings and validate data for **defense in depth**",
"Url": "https://hub.prowler.com/check/dms_replication_task_target_logging_enabled"
}
},
"Categories": [],
"Categories": [
"logging"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
+21
View File
@@ -0,0 +1,21 @@
{
"$schema": "https://ui.shadcn.com/schema.json",
"style": "default",
"rsc": true,
"tsx": true,
"tailwind": {
"config": "",
"css": "styles/globals.css",
"baseColor": "neutral",
"cssVariables": true,
"prefix": ""
},
"aliases": {
"components": "@/components",
"utils": "@/lib/utils",
"ui": "@/components/shadcn",
"lib": "@/lib",
"hooks": "@/hooks"
},
"iconLibrary": "lucide"
}
+57
View File
@@ -0,0 +1,57 @@
# shadcn Components
This directory contains all shadcn/ui based components for the Prowler application.
## Directory Structure
```
shadcn/
├── card.tsx # shadcn Card component
├── resource-stats-card/ # Custom ResourceStatsCard built on shadcn
│ ├── resource-stats-card.tsx
│ ├── resource-stats-card.example.tsx
│ └── index.ts
├── index.ts # Barrel exports
└── README.md
```
## Usage
All shadcn components can be imported from `@/components/shadcn`:
```tsx
import { Card, CardHeader, CardContent } from "@/components/shadcn";
import { ResourceStatsCard } from "@/components/shadcn";
```
## Adding New shadcn Components
When adding new shadcn components using the CLI:
```bash
npx shadcn@latest add [component-name]
```
The component will be automatically added to this directory due to the configuration in `components.json`:
```json
{
"aliases": {
"ui": "@/components/shadcn"
}
}
```
## Component Guidelines
1. **shadcn base components** - Use as-is from shadcn/ui (e.g., `card.tsx`)
2. **Custom components built on shadcn** - Create in subdirectories (e.g., `resource-stats-card/`)
3. **CVA variants** - Use Class Variance Authority for type-safe variants
4. **Theme support** - Include `dark:` classes for dark/light theme compatibility
5. **TypeScript** - Always export types and use proper typing
## Resources
- [shadcn/ui Documentation](https://ui.shadcn.com)
- [CVA Documentation](https://cva.style/docs)
- [Tailwind CSS Documentation](https://tailwindcss.com/docs)
+92
View File
@@ -0,0 +1,92 @@
import * as React from "react";
import { cn } from "@/lib/utils";
function Card({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card"
className={cn(
"bg-card text-card-foreground flex flex-col gap-6 rounded-xl border py-6 shadow-sm",
className,
)}
{...props}
/>
);
}
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-header"
className={cn(
"@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-2 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",
className,
)}
{...props}
/>
);
}
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-title"
className={cn("leading-none font-semibold", className)}
{...props}
/>
);
}
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-description"
className={cn("text-muted-foreground text-sm", className)}
{...props}
/>
);
}
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-action"
className={cn(
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
className,
)}
{...props}
/>
);
}
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-content"
className={cn("px-6", className)}
{...props}
/>
);
}
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-footer"
className={cn("flex items-center px-6 [.border-t]:pt-6", className)}
{...props}
/>
);
}
export {
Card,
CardAction,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
};
+21
View File
@@ -0,0 +1,21 @@
export {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "./card";
export {
ResourceStatsCard,
ResourceStatsCardContainer,
type ResourceStatsCardContainerProps,
ResourceStatsCardContent,
type ResourceStatsCardContentProps,
ResourceStatsCardDivider,
type ResourceStatsCardDividerProps,
ResourceStatsCardHeader,
type ResourceStatsCardHeaderProps,
type ResourceStatsCardProps,
type StatItem,
} from "./resource-stats-card";
@@ -0,0 +1,13 @@
export type { ResourceStatsCardProps } from "./resource-stats-card";
export { ResourceStatsCard } from "./resource-stats-card";
export type { ResourceStatsCardContainerProps } from "./resource-stats-card-container";
export { ResourceStatsCardContainer } from "./resource-stats-card-container";
export type {
ResourceStatsCardContentProps,
StatItem,
} from "./resource-stats-card-content";
export { ResourceStatsCardContent } from "./resource-stats-card-content";
export type { ResourceStatsCardDividerProps } from "./resource-stats-card-divider";
export { ResourceStatsCardDivider } from "./resource-stats-card-divider";
export type { ResourceStatsCardHeaderProps } from "./resource-stats-card-header";
export { ResourceStatsCardHeader } from "./resource-stats-card-header";
@@ -0,0 +1,55 @@
import { cva, type VariantProps } from "class-variance-authority";
import { cn } from "@/lib/utils";
const containerVariants = cva(
[
"flex",
"rounded-[12px]",
"border",
"backdrop-blur-[46px]",
"border-[rgba(38,38,38,0.70)]",
"bg-[rgba(23,23,23,0.50)]",
"dark:border-[rgba(38,38,38,0.70)]",
"dark:bg-[rgba(23,23,23,0.50)]",
],
{
variants: {
padding: {
sm: "px-3 py-2",
md: "px-[19px] py-[9px]",
lg: "px-6 py-3",
none: "p-0",
},
},
defaultVariants: {
padding: "md",
},
},
);
export interface ResourceStatsCardContainerProps
extends React.HTMLAttributes<HTMLDivElement>,
VariantProps<typeof containerVariants> {
ref?: React.Ref<HTMLDivElement>;
}
export const ResourceStatsCardContainer = ({
className,
children,
padding,
ref,
...props
}: ResourceStatsCardContainerProps) => {
return (
<div
ref={ref}
className={cn(containerVariants({ padding }), className)}
{...props}
>
{children}
</div>
);
};
ResourceStatsCardContainer.displayName = "ResourceStatsCardContainer";
@@ -0,0 +1,204 @@
import { cva } from "class-variance-authority";
import { LucideIcon } from "lucide-react";
import { cn } from "@/lib/utils";
export interface StatItem {
icon: LucideIcon;
label: string;
}
export const CardVariant = {
default: "default",
fail: "fail",
pass: "pass",
warning: "warning",
info: "info",
} as const;
export type CardVariant = (typeof CardVariant)[keyof typeof CardVariant];
const variantColors = {
default: "#868994",
fail: "#f54280",
pass: "#4ade80",
warning: "#fbbf24",
info: "#60a5fa",
} as const;
type BadgeVariant = keyof typeof variantColors;
const badgeVariants = cva(
["flex", "items-center", "justify-center", "gap-0.5", "rounded-full"],
{
variants: {
variant: {
[CardVariant.default]: "bg-[#535359]",
[CardVariant.fail]: "bg-[#432232]",
[CardVariant.pass]: "bg-[#204237]",
[CardVariant.warning]: "bg-[#3d3520]",
[CardVariant.info]: "bg-[#1e3a5f]",
},
size: {
sm: "px-1 text-xs",
md: "px-1.5 text-sm",
lg: "px-2 text-base",
},
},
defaultVariants: {
variant: CardVariant.fail,
size: "md",
},
},
);
const badgeIconVariants = cva("", {
variants: {
size: {
sm: "h-2.5 w-2.5",
md: "h-3 w-3",
lg: "h-4 w-4",
},
},
defaultVariants: {
size: "md",
},
});
const labelTextVariants = cva(
"leading-6 font-semibold text-zinc-300 dark:text-zinc-300",
{
variants: {
size: {
sm: "text-xs",
md: "text-sm",
lg: "text-base",
},
},
defaultVariants: {
size: "md",
},
},
);
const statIconVariants = cva("text-zinc-300 dark:text-zinc-300", {
variants: {
size: {
sm: "h-2.5 w-2.5",
md: "h-3 w-3",
lg: "h-3.5 w-3.5",
},
},
defaultVariants: {
size: "md",
},
});
const statLabelVariants = cva(
"leading-5 font-medium text-zinc-300 dark:text-zinc-300",
{
variants: {
size: {
sm: "text-xs",
md: "text-sm",
lg: "text-base",
},
},
defaultVariants: {
size: "md",
},
},
);
export interface ResourceStatsCardContentProps
extends React.HTMLAttributes<HTMLDivElement> {
badge: {
icon: LucideIcon;
count: number | string;
variant?: CardVariant;
};
label: string;
stats?: StatItem[];
accentColor?: string;
size?: "sm" | "md" | "lg";
ref?: React.Ref<HTMLDivElement>;
}
export const ResourceStatsCardContent = ({
badge,
label,
stats = [],
accentColor,
size = "md",
className,
ref,
...props
}: ResourceStatsCardContentProps) => {
const BadgeIcon = badge.icon;
const badgeVariant: BadgeVariant = badge.variant || "fail";
// Determine accent line color
const lineColor = accentColor || variantColors[badgeVariant] || "#d4d4d8";
return (
<div
ref={ref}
className={cn("flex flex-col gap-[5px]", className)}
{...props}
>
{/* Badge and Label Row */}
<div className="flex w-full items-center gap-1">
{/* Badge */}
<div className={cn(badgeVariants({ variant: badgeVariant, size }))}>
<BadgeIcon
className={badgeIconVariants({ size })}
strokeWidth={2.5}
style={{ color: variantColors[badgeVariant] }}
/>
<span
className="leading-6 font-bold"
style={{ color: variantColors[badgeVariant] }}
>
{badge.count}
</span>
</div>
{/* Label */}
<span className={labelTextVariants({ size })}>{label}</span>
</div>
{/* Stats Section */}
{stats.length > 0 && (
<div className="flex w-full items-stretch gap-0">
{/* Vertical Accent Line */}
<div className="flex items-stretch px-3 py-1">
<div
className="w-px rounded-full"
style={{ backgroundColor: lineColor }}
/>
</div>
{/* Stats List */}
<div className="flex flex-1 flex-col gap-0.5">
{stats.map((stat, index) => {
const StatIcon = stat.icon;
return (
<div key={index} className="flex items-center gap-1">
<StatIcon
className={statIconVariants({ size })}
strokeWidth={2}
/>
<span className={statLabelVariants({ size })}>
{stat.label}
</span>
</div>
);
})}
</div>
</div>
)}
</div>
);
};
ResourceStatsCardContent.displayName = "ResourceStatsCardContent";
@@ -0,0 +1,59 @@
import { cva, type VariantProps } from "class-variance-authority";
import { cn } from "@/lib/utils";
const dividerVariants = cva("flex items-center justify-center", {
variants: {
spacing: {
sm: "px-2",
md: "px-[23px]",
lg: "px-8",
},
orientation: {
vertical: "h-full",
horizontal: "w-full",
},
},
defaultVariants: {
spacing: "md",
orientation: "vertical",
},
});
const lineVariants = cva("bg-[rgba(39,39,42,1)]", {
variants: {
orientation: {
vertical: "h-full w-px",
horizontal: "w-full h-px",
},
},
defaultVariants: {
orientation: "vertical",
},
});
export interface ResourceStatsCardDividerProps
extends React.HTMLAttributes<HTMLDivElement>,
VariantProps<typeof dividerVariants> {
ref?: React.Ref<HTMLDivElement>;
}
export const ResourceStatsCardDivider = ({
className,
spacing,
orientation,
ref,
...props
}: ResourceStatsCardDividerProps) => {
return (
<div
ref={ref}
className={cn(dividerVariants({ spacing, orientation }), className)}
{...props}
>
<div className={lineVariants({ orientation })} />
</div>
);
};
ResourceStatsCardDivider.displayName = "ResourceStatsCardDivider";
@@ -0,0 +1,103 @@
import { cva, type VariantProps } from "class-variance-authority";
import { LucideIcon } from "lucide-react";
import { cn } from "@/lib/utils";
const headerVariants = cva("flex w-full items-center gap-1", {
variants: {
size: {
sm: "",
md: "",
lg: "",
},
},
defaultVariants: {
size: "md",
},
});
const iconVariants = cva("text-zinc-300 dark:text-zinc-300", {
variants: {
size: {
sm: "h-3.5 w-3.5",
md: "h-4 w-4",
lg: "h-5 w-5",
},
},
defaultVariants: {
size: "md",
},
});
const titleVariants = cva(
"leading-7 font-semibold text-zinc-300 dark:text-zinc-300",
{
variants: {
size: {
sm: "text-sm",
md: "text-base",
lg: "text-lg",
},
},
defaultVariants: {
size: "md",
},
},
);
const countVariants = cva(
"leading-4 font-normal text-zinc-300 dark:text-zinc-300",
{
variants: {
size: {
sm: "text-[9px]",
md: "text-[10px]",
lg: "text-xs",
},
},
defaultVariants: {
size: "md",
},
},
);
export interface ResourceStatsCardHeaderProps
extends React.HTMLAttributes<HTMLDivElement>,
VariantProps<typeof headerVariants> {
icon: LucideIcon;
title: string;
resourceCount?: number | string;
ref?: React.Ref<HTMLDivElement>;
}
export const ResourceStatsCardHeader = ({
icon: Icon,
title,
resourceCount,
size = "md",
className,
ref,
...props
}: ResourceStatsCardHeaderProps) => {
return (
<div
ref={ref}
className={cn(headerVariants({ size }), className)}
{...props}
>
<div className="flex flex-1 items-center gap-1">
<Icon className={iconVariants({ size })} strokeWidth={2} />
<span className={titleVariants({ size })}>{title}</span>
</div>
{resourceCount !== undefined && (
<span className={countVariants({ size })}>
{typeof resourceCount === "number"
? `${resourceCount} Resources`
: resourceCount}
</span>
)}
</div>
);
};
ResourceStatsCardHeader.displayName = "ResourceStatsCardHeader";
@@ -0,0 +1,164 @@
import { cva, type VariantProps } from "class-variance-authority";
import { LucideIcon } from "lucide-react";
import { cn } from "@/lib/utils";
import { ResourceStatsCardContainer } from "./resource-stats-card-container";
import type { StatItem } from "./resource-stats-card-content";
import {
CardVariant,
ResourceStatsCardContent,
} from "./resource-stats-card-content";
import { ResourceStatsCardHeader } from "./resource-stats-card-header";
export type { StatItem };
// Todo: when the design system is ready, we must use the colors from the design system (semantic colors)
// Variant styles using CVA for type safety and consistency
// Colors are exact HEX values from Figma design system
const cardVariants = cva("", {
variants: {
variant: {
[CardVariant.default]: "",
// Fail variant - rgba(67,34,50) from Figma
[CardVariant.fail]:
"border-[rgba(67,34,50,0.5)] bg-[rgba(67,34,50,0.2)] dark:border-[rgba(67,34,50,0.7)] dark:bg-[rgba(67,34,50,0.3)]",
// Pass variant - rgba(32,66,55) from Figma
[CardVariant.pass]:
"border-[rgba(32,66,55,0.5)] bg-[rgba(32,66,55,0.2)] dark:border-[rgba(32,66,55,0.7)] dark:bg-[rgba(32,66,55,0.3)]",
// Warning variant - rgba(61,53,32) from Figma
[CardVariant.warning]:
"border-[rgba(61,53,32,0.5)] bg-[rgba(61,53,32,0.2)] dark:border-[rgba(61,53,32,0.7)] dark:bg-[rgba(61,53,32,0.3)]",
// Info variant - rgba(30,58,95) from Figma
[CardVariant.info]:
"border-[rgba(30,58,95,0.5)] bg-[rgba(30,58,95,0.2)] dark:border-[rgba(30,58,95,0.7)] dark:bg-[rgba(30,58,95,0.3)]",
},
size: {
sm: "px-2 py-1.5 gap-1",
md: "px-3 py-2 gap-2",
lg: "px-4 py-3 gap-3",
},
},
defaultVariants: {
variant: CardVariant.default,
size: "md",
},
});
export interface ResourceStatsCardProps
extends Omit<React.HTMLAttributes<HTMLDivElement>, "color">,
VariantProps<typeof cardVariants> {
// Optional header (icon + title + resource count)
header?: {
icon: LucideIcon;
title: string;
resourceCount?: number | string;
};
// Empty state message (when there's no data to display)
emptyState?: {
message: string;
};
// Main badge (top section) - optional when using empty state
badge?: {
icon: LucideIcon;
count: number | string;
variant?: CardVariant;
};
// Main label - optional when using empty state
label?: string;
// Vertical accent line color (optional, auto-determined from variant)
accentColor?: string;
// Sub-statistics array (flexible items)
stats?: StatItem[];
// Render without container (no border, background, padding) - useful for composing multiple cards in a custom container
containerless?: boolean;
// Ref for the root element
ref?: React.Ref<HTMLDivElement>;
}
export const ResourceStatsCard = ({
header,
emptyState,
badge,
label,
accentColor,
stats = [],
variant = CardVariant.default,
size = "md",
containerless = false,
className,
ref,
...props
}: ResourceStatsCardProps) => {
// Resolve size to ensure it's not null (CVA can return null but we need a defined value)
const resolvedSize = size || "md";
// If containerless, render without outer wrapper
if (containerless) {
return (
<div
ref={ref}
className={cn("flex flex-col gap-[5px]", className)}
{...props}
>
{header && <ResourceStatsCardHeader {...header} size={resolvedSize} />}
{emptyState ? (
<div className="flex h-[51px] w-full flex-col items-center justify-center">
<p className="text-center text-sm leading-5 font-medium text-zinc-300 dark:text-zinc-300">
{emptyState.message}
</p>
</div>
) : (
badge &&
label && (
<ResourceStatsCardContent
badge={badge}
label={label}
stats={stats}
accentColor={accentColor}
size={resolvedSize}
/>
)
)}
</div>
);
}
// Otherwise, render with container
return (
<ResourceStatsCardContainer
ref={ref}
className={cn(cardVariants({ variant, size }), "flex-col", className)}
{...props}
>
{header && <ResourceStatsCardHeader {...header} size={resolvedSize} />}
{emptyState ? (
<div className="flex h-[51px] w-full flex-col items-center justify-center">
<p className="text-center text-sm leading-5 font-medium text-zinc-300 dark:text-zinc-300">
{emptyState.message}
</p>
</div>
) : (
badge &&
label && (
<ResourceStatsCardContent
badge={badge}
label={label}
stats={stats}
accentColor={accentColor}
size={resolvedSize}
/>
)
)}
</ResourceStatsCardContainer>
);
};
ResourceStatsCard.displayName = "ResourceStatsCard";
+8
View File
@@ -399,6 +399,14 @@
"strategy": "installed",
"generatedAt": "2025-09-10T11:50:17.548Z"
},
{
"section": "dependencies",
"name": "tw-animate-css",
"from": "1.4.0",
"to": "1.4.0",
"strategy": "installed",
"generatedAt": "2025-10-15T07:57:13.225Z"
},
{
"section": "dependencies",
"name": "uuid",
+394 -18
View File
File diff suppressed because it is too large Load Diff
+3 -2
View File
@@ -69,17 +69,17 @@
"recharts": "2.15.4",
"rss-parser": "3.13.0",
"server-only": "0.0.1",
"shadcn": "3.2.1",
"sharp": "0.33.5",
"tailwind-merge": "3.3.1",
"tailwindcss-animate": "1.0.7",
"tw-animate-css": "1.4.0",
"uuid": "11.1.0",
"zod": "4.1.11",
"zustand": "5.0.8"
},
"devDependencies": {
"@iconify/react": "5.2.1",
"@playwright/test": "1.53.2",
"@playwright/test": "1.56.1",
"@types/node": "20.5.7",
"@types/react": "19.1.13",
"@types/react-dom": "19.1.9",
@@ -105,6 +105,7 @@
"postcss": "8.4.38",
"prettier": "3.6.2",
"prettier-plugin-tailwindcss": "0.6.14",
"shadcn": "3.4.1",
"tailwind-variants": "0.1.20",
"tailwindcss": "4.1.13",
"typescript": "5.5.4"