mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-13 15:50:55 +00:00
Compare commits
79 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| dd4636b5d3 | |||
| 39710a6841 | |||
| f330440c54 | |||
| c3940c7454 | |||
| df39f332e4 | |||
| 4a364d91be | |||
| 4b99c7b651 | |||
| c441423d6a | |||
| 7e7f160b9a | |||
| aaae73cd1c | |||
| c5e88f4a74 | |||
| 5d4415d090 | |||
| 5d840385df | |||
| f831171a21 | |||
| 2740d73fe7 | |||
| 1c906b37cd | |||
| 98056b7c85 | |||
| f15ef0d16c | |||
| c42ce6242f | |||
| 702d652de1 | |||
| fff02073cf | |||
| 23e3ea4a41 | |||
| f9afb50ed9 | |||
| 3b95aad6ce | |||
| ac5737d8c4 | |||
| a452c8c3eb | |||
| aa8be0b2fe | |||
| 46bf8e0fef | |||
| c0df0cd1a8 | |||
| 80d58a7b50 | |||
| 2c28d74598 | |||
| 4feab1be55 | |||
| 5bc9b09490 | |||
| fcf817618a | |||
| cad97f25ac | |||
| b854563854 | |||
| 573975f3fe | |||
| f4081f92a1 | |||
| 374496e7ff | |||
| 2a9c2b926d | |||
| f2f1e6bce6 | |||
| 25c823076f | |||
| 6ff559c0d4 | |||
| 899db55f56 | |||
| 22d801ade2 | |||
| 1dc6d41198 | |||
| 456712a0ef | |||
| 885ee62062 | |||
| bbeccaf085 | |||
| d1aca5641a | |||
| 3b7eba64aa | |||
| e9e0797642 | |||
| aaa5abdead | |||
| 0a2749b716 | |||
| 8f8bf63086 | |||
| ea27817a2c | |||
| 9068e6bcd0 | |||
| a4907d8098 | |||
| caee7830a5 | |||
| 65d2989bea | |||
| 6c34945829 | |||
| ce859ddd1f | |||
| 0ca059b45b | |||
| dad100b87a | |||
| 662296aa0e | |||
| b6d49416f0 | |||
| 42be77e82e | |||
| 63169289b0 | |||
| 43d310356d | |||
| 59ae503681 | |||
| bd62f56df4 | |||
| 90fbad16b9 | |||
| affd0c5ffb | |||
| 929bbe3550 | |||
| eb7ef4a8b9 | |||
| 017e19ac18 | |||
| be7680786a | |||
| efba5d2a8d | |||
| 44431a56de |
@@ -35,6 +35,8 @@ POSTGRES_DB=prowler_db
|
||||
# POSTGRES_REPLICA_USER=prowler
|
||||
# POSTGRES_REPLICA_PASSWORD=postgres
|
||||
# POSTGRES_REPLICA_DB=prowler_db
|
||||
# POSTGRES_REPLICA_MAX_ATTEMPTS=3
|
||||
# POSTGRES_REPLICA_RETRY_BASE_DELAY=0.5
|
||||
|
||||
# Celery-Prowler task settings
|
||||
TASK_RETRY_DELAY_SECONDS=0.1
|
||||
|
||||
@@ -22,8 +22,8 @@ inputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Replace @master with current branch in pyproject.toml
|
||||
if: github.event_name == 'pull_request' && github.base_ref == 'master'
|
||||
- name: Replace @master with current branch in pyproject.toml (prowler repo only)
|
||||
if: github.event_name == 'pull_request' && github.base_ref == 'master' && github.repository == 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
@@ -37,8 +37,8 @@ runs:
|
||||
python -m pip install --upgrade pip
|
||||
pipx install poetry==${{ inputs.poetry-version }}
|
||||
|
||||
- name: Update SDK resolved_reference to latest commit
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
- name: Update poetry.lock with latest Prowler commit
|
||||
if: github.repository_owner == 'prowler-cloud' && github.repository != 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
@@ -50,7 +50,21 @@ runs:
|
||||
echo "Updated resolved_reference:"
|
||||
grep -A2 -B2 "resolved_reference" poetry.lock
|
||||
|
||||
- name: Update poetry.lock
|
||||
- name: Update SDK resolved_reference to latest commit (prowler repo on push)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
|
||||
echo "Latest commit hash: $LATEST_COMMIT"
|
||||
sed -i '/url = "https:\/\/github\.com\/prowler-cloud\/prowler\.git"/,/resolved_reference = / {
|
||||
s/resolved_reference = "[a-f0-9]\{40\}"/resolved_reference = "'"$LATEST_COMMIT"'"/
|
||||
}' poetry.lock
|
||||
echo "Updated resolved_reference:"
|
||||
grep -A2 -B2 "resolved_reference" poetry.lock
|
||||
|
||||
- name: Update poetry.lock (prowler repo only)
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: poetry lock
|
||||
@@ -69,3 +83,11 @@ runs:
|
||||
run: |
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
|
||||
- name: Update Prowler Cloud API Client
|
||||
if: github.repository_owner == 'prowler-cloud' && github.repository != 'prowler-cloud/prowler'
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry remove prowler-cloud-api-client
|
||||
poetry add ./prowler-cloud-api-client
|
||||
|
||||
@@ -0,0 +1,198 @@
|
||||
# Slack Notification Action
|
||||
|
||||
A generic and flexible GitHub composite action for sending Slack notifications using JSON template files. Supports both standalone messages and message updates, with automatic status detection.
|
||||
|
||||
## Features
|
||||
|
||||
- **Template-based**: All messages use JSON template files for consistency
|
||||
- **Automatic status detection**: Pass `step-outcome` to auto-calculate success/failure
|
||||
- **Message updates**: Supports updating existing messages (using `chat.update`)
|
||||
- **Simple API**: Clean and minimal interface
|
||||
- **Reusable**: Use across all workflows and scenarios
|
||||
- **Maintainable**: Centralized message templates
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. **Container releases**: Track push start and completion with automatic status
|
||||
2. **Deployments**: Track deployment progress with rich Block Kit formatting
|
||||
3. **Custom notifications**: Any scenario where you need to notify Slack
|
||||
|
||||
## Inputs
|
||||
|
||||
| Input | Description | Required | Default |
|
||||
|-------|-------------|----------|---------|
|
||||
| `slack-bot-token` | Slack bot token for authentication | Yes | - |
|
||||
| `payload-file-path` | Path to JSON file with the Slack message payload | Yes | - |
|
||||
| `update-ts` | Message timestamp to update (leave empty for new messages) | No | `''` |
|
||||
| `step-outcome` | Step outcome for automatic status detection (sets STATUS_EMOJI and STATUS_TEXT env vars) | No | `''` |
|
||||
|
||||
## Outputs
|
||||
|
||||
| Output | Description |
|
||||
|--------|-------------|
|
||||
| `ts` | Timestamp of the Slack message (use for updates) |
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Container Release with Automatic Status Detection
|
||||
|
||||
Using JSON template files with automatic status detection:
|
||||
|
||||
```yaml
|
||||
# Send start notification
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
# Do the work
|
||||
- name: Build and push container
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@...
|
||||
with:
|
||||
push: true
|
||||
tags: ...
|
||||
|
||||
# Send completion notification with automatic status detection
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- No status calculation needed in workflow
|
||||
- Reusable template files
|
||||
- Clean and concise
|
||||
- Automatic `STATUS_EMOJI` and `STATUS_TEXT` env vars set by action
|
||||
- Consistent message format across all workflows
|
||||
|
||||
### Example 2: Deployment with Message Update Pattern
|
||||
|
||||
```yaml
|
||||
# Send initial deployment message
|
||||
- name: Notify deployment started
|
||||
id: slack-start
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
ENVIRONMENT: PRODUCTION
|
||||
COMMIT_HASH: ${{ github.sha }}
|
||||
VERSION_DEPLOYED: latest
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_WORKFLOW: ${{ github.workflow }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/deployment-started.json"
|
||||
|
||||
# Run deployment
|
||||
- name: Deploy
|
||||
id: deploy
|
||||
run: terraform apply -auto-approve
|
||||
|
||||
# Determine additional status variables
|
||||
- name: Determine deployment status
|
||||
if: always()
|
||||
id: deploy-status
|
||||
run: |
|
||||
if [[ "${{ steps.deploy.outcome }}" == "success" ]]; then
|
||||
echo "STATUS_COLOR=28a745" >> $GITHUB_ENV
|
||||
echo "STATUS=Completed" >> $GITHUB_ENV
|
||||
else
|
||||
echo "STATUS_COLOR=fc3434" >> $GITHUB_ENV
|
||||
echo "STATUS=Failed" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
# Update the same message with final status
|
||||
- name: Update deployment notification
|
||||
if: always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
MESSAGE_TS: ${{ steps.slack-start.outputs.ts }}
|
||||
COMPONENT: API
|
||||
ENVIRONMENT: PRODUCTION
|
||||
COMMIT_HASH: ${{ github.sha }}
|
||||
VERSION_DEPLOYED: latest
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_WORKFLOW: ${{ github.workflow }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
STATUS: ${{ env.STATUS }}
|
||||
STATUS_COLOR: ${{ env.STATUS_COLOR }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
update-ts: ${{ steps.slack-start.outputs.ts }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/deployment-completed.json"
|
||||
step-outcome: ${{ steps.deploy.outcome }}
|
||||
```
|
||||
|
||||
## Automatic Status Detection
|
||||
|
||||
When you provide `step-outcome` input, the action automatically sets these environment variables:
|
||||
|
||||
| Outcome | STATUS_EMOJI | STATUS_TEXT |
|
||||
|---------|--------------|-------------|
|
||||
| success | `[✓]` | `completed successfully!` |
|
||||
| failure | `[✗]` | `failed` |
|
||||
|
||||
These variables are then available in your payload template files.
|
||||
|
||||
## Template File Format
|
||||
|
||||
All template files must be valid JSON and support environment variable substitution. Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"channel": "$SLACK_CHANNEL_ID",
|
||||
"text": "$STATUS_EMOJI $COMPONENT container release $RELEASE_TAG push $STATUS_TEXT <$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID|View run>"
|
||||
}
|
||||
```
|
||||
|
||||
See available templates in [`.github/scripts/slack-messages/`](../../scripts/slack-messages/).
|
||||
|
||||
## Requirements
|
||||
|
||||
- Slack Bot Token with scopes: `chat:write`, `chat:write.public`
|
||||
- Slack Channel ID where messages will be posted
|
||||
- JSON template files for your messages
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Consistency**: All notifications use standardized templates
|
||||
- **Automatic status handling**: No need to calculate success/failure in workflows
|
||||
- **Clean workflows**: Minimal boilerplate code
|
||||
- **Reusable templates**: One template for all components
|
||||
- **Easy to maintain**: Change template once, applies everywhere
|
||||
- **Version controlled**: All message formats in git
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [Slack Block Kit Builder](https://app.slack.com/block-kit-builder)
|
||||
- [Slack API Method Documentation](https://docs.slack.dev/tools/slack-github-action/sending-techniques/sending-data-slack-api-method/)
|
||||
- [Message templates documentation](../../scripts/slack-messages/README.md)
|
||||
@@ -0,0 +1,76 @@
|
||||
name: 'Slack Notification'
|
||||
description: 'Generic action to send Slack notifications with optional message updates and automatic status detection'
|
||||
inputs:
|
||||
slack-bot-token:
|
||||
description: 'Slack bot token for authentication'
|
||||
required: true
|
||||
payload-file-path:
|
||||
description: 'Path to JSON file with the Slack message payload'
|
||||
required: true
|
||||
update-ts:
|
||||
description: 'Message timestamp to update (only for updates, leave empty for new messages)'
|
||||
required: false
|
||||
default: ''
|
||||
step-outcome:
|
||||
description: 'Outcome of a step to determine status (success/failure) - automatically sets STATUS_EMOJI, STATUS_TEXT, and STATUS_COLOR env vars'
|
||||
required: false
|
||||
default: ''
|
||||
outputs:
|
||||
ts:
|
||||
description: 'Timestamp of the Slack message'
|
||||
value: ${{ steps.slack-notification.outputs.ts }}
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Determine status
|
||||
id: status
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ inputs.step-outcome }}" == "success" ]]; then
|
||||
echo "STATUS_EMOJI=[✓]" >> $GITHUB_ENV
|
||||
echo "STATUS_TEXT=succeeded" >> $GITHUB_ENV
|
||||
echo "STATUS_COLOR=6aa84f" >> $GITHUB_ENV
|
||||
elif [[ "${{ inputs.step-outcome }}" == "failure" ]]; then
|
||||
echo "STATUS_EMOJI=[✗]" >> $GITHUB_ENV
|
||||
echo "STATUS_TEXT=failed" >> $GITHUB_ENV
|
||||
echo "STATUS_COLOR=fc3434" >> $GITHUB_ENV
|
||||
else
|
||||
# No outcome provided - pending/in progress state
|
||||
echo "STATUS_COLOR=dbab09" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Send Slack notification (new message)
|
||||
if: inputs.update-ts == ''
|
||||
id: slack-notification-post
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_PAYLOAD_FILE_PATH: ${{ inputs.payload-file-path }}
|
||||
with:
|
||||
method: chat.postMessage
|
||||
token: ${{ inputs.slack-bot-token }}
|
||||
payload-file-path: ${{ inputs.payload-file-path }}
|
||||
payload-templated: true
|
||||
errors: true
|
||||
|
||||
- name: Update Slack notification
|
||||
if: inputs.update-ts != ''
|
||||
id: slack-notification-update
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_PAYLOAD_FILE_PATH: ${{ inputs.payload-file-path }}
|
||||
with:
|
||||
method: chat.update
|
||||
token: ${{ inputs.slack-bot-token }}
|
||||
payload-file-path: ${{ inputs.payload-file-path }}
|
||||
payload-templated: true
|
||||
errors: true
|
||||
|
||||
- name: Set output
|
||||
id: slack-notification
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ inputs.update-ts }}" == "" ]]; then
|
||||
echo "ts=${{ steps.slack-notification-post.outputs.ts }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "ts=${{ inputs.update-ts }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
@@ -45,22 +45,13 @@ outputs:
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Run Trivy vulnerability scan (SARIF)
|
||||
if: inputs.upload-sarif == 'true'
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
- name: Cache Trivy vulnerability database
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
image-ref: ${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
format: 'sarif'
|
||||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
exit-code: '0'
|
||||
|
||||
- name: Upload Trivy results to GitHub Security tab
|
||||
if: inputs.upload-sarif == 'true'
|
||||
uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
category: 'trivy-container'
|
||||
path: ~/.cache/trivy
|
||||
key: trivy-db-${{ runner.os }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
trivy-db-${{ runner.os }}-
|
||||
|
||||
- name: Run Trivy vulnerability scan (JSON)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
@@ -70,6 +61,27 @@ runs:
|
||||
output: 'trivy-report.json'
|
||||
severity: ${{ inputs.severity }}
|
||||
exit-code: '0'
|
||||
scanners: 'vuln'
|
||||
timeout: '5m'
|
||||
|
||||
- name: Run Trivy vulnerability scan (SARIF)
|
||||
if: inputs.upload-sarif == 'true' && github.event_name == 'push'
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
with:
|
||||
image-ref: ${{ inputs.image-name }}:${{ inputs.image-tag }}
|
||||
format: 'sarif'
|
||||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
exit-code: '0'
|
||||
scanners: 'vuln'
|
||||
timeout: '5m'
|
||||
|
||||
- name: Upload Trivy results to GitHub Security tab
|
||||
if: inputs.upload-sarif == 'true' && github.event_name == 'push'
|
||||
uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
category: 'trivy-container'
|
||||
|
||||
- name: Upload Trivy report artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
@@ -109,20 +121,20 @@ runs:
|
||||
with:
|
||||
script: |
|
||||
const comment = require('./.github/scripts/trivy-pr-comment.js');
|
||||
|
||||
|
||||
// Unique identifier to find our comment
|
||||
const marker = '<!-- trivy-scan-comment:${{ inputs.image-name }} -->';
|
||||
const body = marker + '\n' + comment;
|
||||
|
||||
|
||||
// Find existing comment
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
|
||||
|
||||
const existingComment = comments.find(c => c.body?.includes(marker));
|
||||
|
||||
|
||||
if (existingComment) {
|
||||
// Update existing comment
|
||||
await github.rest.issues.updateComment({
|
||||
|
||||
@@ -0,0 +1,462 @@
|
||||
# Slack Message Templates
|
||||
|
||||
This directory contains reusable message templates for Slack notifications sent from GitHub Actions workflows.
|
||||
|
||||
## Usage
|
||||
|
||||
These JSON templates are used with the `slackapi/slack-github-action` using the Slack API method (`chat.postMessage` and `chat.update`). All templates support rich Block Kit formatting and message updates.
|
||||
|
||||
### Available Templates
|
||||
|
||||
**Container Releases**
|
||||
- `container-release-started.json`: Simple one-line notification when container push starts
|
||||
- `container-release-completed.json`: Simple one-line notification when container release completes
|
||||
|
||||
**Deployments**
|
||||
- `deployment-started.json`: Deployment start notification with Block Kit formatting
|
||||
- `deployment-completed.json`: Deployment completion notification (updates the start message)
|
||||
|
||||
All templates use the Slack API method and require a Slack Bot Token.
|
||||
|
||||
## Setup Requirements
|
||||
|
||||
1. Create a Slack App (or use existing)
|
||||
2. Add Bot Token Scopes: `chat:write`, `chat:write.public`
|
||||
3. Install the app to your workspace
|
||||
4. Get the Bot Token from OAuth & Permissions page
|
||||
5. Add secrets:
|
||||
- `SLACK_BOT_TOKEN`: Your bot token
|
||||
- `SLACK_CHANNEL_ID`: The channel ID where messages will be posted
|
||||
|
||||
Reference: [Sending data using a Slack API method](https://docs.slack.dev/tools/slack-github-action/sending-techniques/sending-data-slack-api-method/)
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Required Secrets (GitHub Secrets)
|
||||
- `SLACK_BOT_TOKEN`: Passed as `token` parameter to the action (not as env variable)
|
||||
- `SLACK_CHANNEL_ID`: Used in payload as env variable
|
||||
|
||||
### Container Release Variables (configured as env)
|
||||
- `COMPONENT`: Component name (e.g., "API", "SDK", "UI", "MCP")
|
||||
- `RELEASE_TAG` / `PROWLER_VERSION`: The release tag or version being deployed
|
||||
- `GITHUB_SERVER_URL`: Provided by GitHub context
|
||||
- `GITHUB_REPOSITORY`: Provided by GitHub context
|
||||
- `GITHUB_RUN_ID`: Provided by GitHub context
|
||||
- `STATUS_EMOJI`: Status symbol (calculated: `[✓]` for success, `[✗]` for failure)
|
||||
- `STATUS_TEXT`: Status text (calculated: "completed successfully!" or "failed")
|
||||
|
||||
### Deployment Variables (configured as env)
|
||||
- `COMPONENT`: Component name (e.g., "API", "SDK", "UI", "MCP")
|
||||
- `ENVIRONMENT`: Environment name (e.g., "DEVELOPMENT", "PRODUCTION")
|
||||
- `COMMIT_HASH`: Commit hash being deployed
|
||||
- `VERSION_DEPLOYED`: Version being deployed
|
||||
- `GITHUB_ACTOR`: User who triggered the workflow
|
||||
- `GITHUB_WORKFLOW`: Workflow name
|
||||
- `GITHUB_SERVER_URL`: Provided by GitHub context
|
||||
- `GITHUB_REPOSITORY`: Provided by GitHub context
|
||||
- `GITHUB_RUN_ID`: Provided by GitHub context
|
||||
|
||||
All other variables (MESSAGE_TS, STATUS, STATUS_COLOR, STATUS_EMOJI, etc.) are calculated internally within the workflow and should NOT be configured as environment variables.
|
||||
|
||||
## Example Workflow Usage
|
||||
|
||||
### Using the Generic Slack Notification Action (Recommended)
|
||||
|
||||
**Recommended approach**: Use the generic reusable action `.github/actions/slack-notification` which provides maximum flexibility:
|
||||
|
||||
#### Example 1: Container Release (Start + Completion)
|
||||
|
||||
```yaml
|
||||
# Send start notification
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload: |
|
||||
{
|
||||
"channel": "${{ secrets.SLACK_CHANNEL_ID }}",
|
||||
"text": "API container release ${{ env.RELEASE_TAG }} push started... <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View run>"
|
||||
}
|
||||
|
||||
# Build and push container
|
||||
- name: Build and push container
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@...
|
||||
with:
|
||||
push: true
|
||||
tags: ...
|
||||
|
||||
# Calculate status
|
||||
- name: Determine push status
|
||||
if: github.event_name == 'release' && always()
|
||||
id: push-status
|
||||
run: |
|
||||
if [[ "${{ steps.container-push.outcome }}" == "success" ]]; then
|
||||
echo "emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
echo "text=completed successfully!" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
echo "text=failed" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Send completion notification
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload: |
|
||||
{
|
||||
"channel": "${{ secrets.SLACK_CHANNEL_ID }}",
|
||||
"text": "${{ steps.push-status.outputs.emoji }} API container release ${{ env.RELEASE_TAG }} push ${{ steps.push-status.outputs.text }} <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View run>"
|
||||
}
|
||||
```
|
||||
|
||||
#### Example 2: Simple One-Time Message
|
||||
|
||||
```yaml
|
||||
- name: Send notification
|
||||
uses: ./.github/actions/slack-notification
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload: |
|
||||
{
|
||||
"channel": "${{ secrets.SLACK_CHANNEL_ID }}",
|
||||
"text": "Deployment completed successfully!"
|
||||
}
|
||||
```
|
||||
|
||||
#### Example 3: Deployment with Message Update Pattern
|
||||
|
||||
```yaml
|
||||
# Send initial deployment message
|
||||
- name: Notify deployment started
|
||||
id: slack-start
|
||||
uses: ./.github/actions/slack-notification
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload: |
|
||||
{
|
||||
"channel": "${{ secrets.SLACK_CHANNEL_ID }}",
|
||||
"text": "API deployment to PRODUCTION started",
|
||||
"attachments": [
|
||||
{
|
||||
"color": "dbab09",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": "API | Deployment to PRODUCTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"fields": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "*Status:*\nIn Progress"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Run deployment
|
||||
- name: Deploy
|
||||
id: deploy
|
||||
run: terraform apply -auto-approve
|
||||
|
||||
# Calculate status
|
||||
- name: Determine status
|
||||
if: always()
|
||||
id: status
|
||||
run: |
|
||||
if [[ "${{ steps.deploy.outcome }}" == "success" ]]; then
|
||||
echo "color=28a745" >> $GITHUB_OUTPUT
|
||||
echo "emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
echo "status=Completed" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "color=fc3434" >> $GITHUB_OUTPUT
|
||||
echo "emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
echo "status=Failed" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Update the same message with final status
|
||||
- name: Update deployment notification
|
||||
if: always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
update-ts: ${{ steps.slack-start.outputs.ts }}
|
||||
payload: |
|
||||
{
|
||||
"channel": "${{ secrets.SLACK_CHANNEL_ID }}",
|
||||
"ts": "${{ steps.slack-start.outputs.ts }}",
|
||||
"text": "${{ steps.status.outputs.emoji }} API deployment to PRODUCTION ${{ steps.status.outputs.status }}",
|
||||
"attachments": [
|
||||
{
|
||||
"color": "${{ steps.status.outputs.color }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": "API | Deployment to PRODUCTION"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"fields": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "*Status:*\n${{ steps.status.outputs.emoji }} ${{ steps.status.outputs.status }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits of using the generic action:**
|
||||
- Maximum flexibility: Build any payload you need directly in the workflow
|
||||
- No template files needed: Everything inline
|
||||
- Supports all scenarios: one-time messages, start/update patterns, rich Block Kit
|
||||
- Easy to customize per use case
|
||||
- Generic: Works for containers, deployments, or any notification type
|
||||
|
||||
For more details, see [Slack Notification Action](../../actions/slack-notification/README.md).
|
||||
|
||||
### Using Message Templates (Alternative Approach)
|
||||
|
||||
Simple one-line notifications for container releases:
|
||||
|
||||
```yaml
|
||||
# Step 1: Notify when push starts
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
method: chat.postMessage
|
||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
# Step 2: Build and push container
|
||||
- name: Build and push container
|
||||
id: container-push
|
||||
uses: docker/build-push-action@...
|
||||
with:
|
||||
push: true
|
||||
tags: ...
|
||||
|
||||
# Step 3: Determine push status
|
||||
- name: Determine push status
|
||||
if: github.event_name == 'release' && always()
|
||||
id: push-status
|
||||
run: |
|
||||
if [[ "${{ steps.container-push.outcome }}" == "success" ]]; then
|
||||
echo "status-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
echo "status-text=completed successfully!" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "status-emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
echo "status-text=failed" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Step 4: Notify when push completes (success or failure)
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
STATUS_EMOJI: ${{ steps.push-status.outputs.status-emoji }}
|
||||
STATUS_TEXT: ${{ steps.push-status.outputs.status-text }}
|
||||
with:
|
||||
method: chat.postMessage
|
||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
```
|
||||
|
||||
### Deployment with Update Pattern
|
||||
|
||||
For deployments that start with one message and update it with the final status:
|
||||
|
||||
```yaml
|
||||
# Step 1: Send deployment start notification
|
||||
- name: Notify Deployment Start
|
||||
id: slack-notification-start
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
COMPONENT: API
|
||||
ENVIRONMENT: PRODUCTION
|
||||
COMMIT_HASH: ${{ github.sha }}
|
||||
VERSION_DEPLOYED: latest
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_WORKFLOW: ${{ github.workflow }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
method: chat.postMessage
|
||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/deployment-started.json"
|
||||
|
||||
# Step 2: Run your deployment steps
|
||||
- name: Terraform Plan
|
||||
id: terraform-plan
|
||||
run: terraform plan
|
||||
|
||||
- name: Terraform Apply
|
||||
id: terraform-apply
|
||||
run: terraform apply -auto-approve
|
||||
|
||||
# Step 3: Determine status (calculated internally, not configured)
|
||||
- name: Determine Status
|
||||
if: always()
|
||||
id: determine-status
|
||||
run: |
|
||||
if [[ "${{ steps.terraform-apply.outcome }}" == "success" ]]; then
|
||||
echo "status=Completed" >> $GITHUB_OUTPUT
|
||||
echo "status-color=28a745" >> $GITHUB_OUTPUT
|
||||
echo "status-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
echo "plan-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
echo "apply-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
elif [[ "${{ steps.terraform-plan.outcome }}" == "failure" || "${{ steps.terraform-apply.outcome }}" == "failure" ]]; then
|
||||
echo "status=Failed" >> $GITHUB_OUTPUT
|
||||
echo "status-color=fc3434" >> $GITHUB_OUTPUT
|
||||
echo "status-emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
if [[ "${{ steps.terraform-plan.outcome }}" == "failure" ]]; then
|
||||
echo "plan-emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "plan-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
if [[ "${{ steps.terraform-apply.outcome }}" == "failure" ]]; then
|
||||
echo "apply-emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "apply-emoji=[✓]" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
else
|
||||
echo "status=Failed" >> $GITHUB_OUTPUT
|
||||
echo "status-color=fc3434" >> $GITHUB_OUTPUT
|
||||
echo "status-emoji=[✗]" >> $GITHUB_OUTPUT
|
||||
echo "plan-emoji=[?]" >> $GITHUB_OUTPUT
|
||||
echo "apply-emoji=[?]" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Step 4: Update the same Slack message (using calculated values)
|
||||
- name: Notify Deployment Result
|
||||
if: always()
|
||||
uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
MESSAGE_TS: ${{ steps.slack-notification-start.outputs.ts }}
|
||||
COMPONENT: API
|
||||
ENVIRONMENT: PRODUCTION
|
||||
COMMIT_HASH: ${{ github.sha }}
|
||||
VERSION_DEPLOYED: latest
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_WORKFLOW: ${{ github.workflow }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
STATUS: ${{ steps.determine-status.outputs.status }}
|
||||
STATUS_COLOR: ${{ steps.determine-status.outputs.status-color }}
|
||||
STATUS_EMOJI: ${{ steps.determine-status.outputs.status-emoji }}
|
||||
PLAN_EMOJI: ${{ steps.determine-status.outputs.plan-emoji }}
|
||||
APPLY_EMOJI: ${{ steps.determine-status.outputs.apply-emoji }}
|
||||
TERRAFORM_PLAN_OUTCOME: ${{ steps.terraform-plan.outcome }}
|
||||
TERRAFORM_APPLY_OUTCOME: ${{ steps.terraform-apply.outcome }}
|
||||
with:
|
||||
method: chat.update
|
||||
token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/deployment-completed.json"
|
||||
```
|
||||
|
||||
**Note**: Variables like `STATUS`, `STATUS_COLOR`, `STATUS_EMOJI`, `PLAN_EMOJI`, `APPLY_EMOJI` are calculated by the `determine-status` step based on the outcomes of previous steps. They should NOT be manually configured.
|
||||
|
||||
## Key Features
|
||||
|
||||
### Benefits of Using Slack API Method
|
||||
|
||||
- **Rich Block Kit Formatting**: Full support for Slack's Block Kit including headers, sections, fields, colors, and attachments
|
||||
- **Message Updates**: Update the same message instead of posting multiple messages (using `chat.update` with `ts`)
|
||||
- **Consistent Experience**: Same look and feel as Prowler Cloud notifications
|
||||
- **Flexible**: Easy to customize message appearance by editing JSON templates
|
||||
|
||||
### Differences from Webhook Method
|
||||
|
||||
| Feature | webhook-trigger | Slack API (chat.postMessage) |
|
||||
|---------|-----------------|------------------------------|
|
||||
| Setup | Workflow Builder webhook | Slack Bot Token + Channel ID |
|
||||
| Formatting | Plain text/simple | Full Block Kit support |
|
||||
| Message Update | No | Yes (with chat.update) |
|
||||
| Authentication | Webhook URL | Bot Token |
|
||||
| Scopes Required | None | chat:write, chat:write.public |
|
||||
|
||||
## Message Appearance
|
||||
|
||||
### Container Release (Simple One-Line)
|
||||
|
||||
**Start message:**
|
||||
```
|
||||
API container release 4.5.0 push started... View run
|
||||
```
|
||||
|
||||
**Completion message (success):**
|
||||
```
|
||||
[✓] API container release 4.5.0 push completed successfully! View run
|
||||
```
|
||||
|
||||
**Completion message (failure):**
|
||||
```
|
||||
[✗] API container release 4.5.0 push failed View run
|
||||
```
|
||||
|
||||
All messages are simple one-liners with a clickable "View run" link. The completion message adapts to show success `[✓]` or failure `[✗]` based on the outcome of the container push.
|
||||
|
||||
### Deployment Start
|
||||
- Header: Component and environment
|
||||
- Yellow bar (color: `dbab09`)
|
||||
- Status: In Progress
|
||||
- Details: Commit, version, actor, workflow
|
||||
- Link: Direct link to deployment run
|
||||
|
||||
### Deployment Completion
|
||||
- Header: Component and environment
|
||||
- Green bar for success (color: `28a745`) / Red bar for failure (color: `fc3434`)
|
||||
- Status: [✓] Completed or [✗] Failed
|
||||
- Details: All deployment info plus terraform outcomes
|
||||
- Link: Direct link to deployment run
|
||||
|
||||
## Adding New Templates
|
||||
|
||||
1. Create a new JSON file with Block Kit structure
|
||||
2. Use environment variable placeholders (e.g., `$VAR_NAME`)
|
||||
3. Include `channel` and `text` fields (required)
|
||||
4. Add `blocks` or `attachments` for rich formatting
|
||||
5. For update templates, include `ts` field as `$MESSAGE_TS`
|
||||
6. Document the template in this README
|
||||
7. Reference it in your workflow using `payload-file-path`
|
||||
|
||||
## Reference
|
||||
|
||||
- [Slack Block Kit Builder](https://app.slack.com/block-kit-builder)
|
||||
- [Slack API Method Documentation](https://docs.slack.dev/tools/slack-github-action/sending-techniques/sending-data-slack-api-method/)
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"channel": "${{ env.SLACK_CHANNEL_ID }}",
|
||||
"text": "${{ env.STATUS_EMOJI }} ${{ env.COMPONENT }} container release ${{ env.RELEASE_TAG }} push ${{ env.STATUS_TEXT }} <${{ env.GITHUB_SERVER_URL }}/${{ env.GITHUB_REPOSITORY }}/actions/runs/${{ env.GITHUB_RUN_ID }}|View run>"
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"channel": "${{ env.SLACK_CHANNEL_ID }}",
|
||||
"text": "${{ env.COMPONENT }} container release ${{ env.RELEASE_TAG }} push started... <${{ env.GITHUB_SERVER_URL }}/${{ env.GITHUB_REPOSITORY }}/actions/runs/${{ env.GITHUB_RUN_ID }}|View run>"
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
name: API - Build and Push containers
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
paths:
|
||||
- "api/**"
|
||||
- "prowler/**"
|
||||
- ".github/workflows/api-build-lint-push-containers.yml"
|
||||
|
||||
# Uncomment the code below to test this action on PRs
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - "master"
|
||||
# paths:
|
||||
# - "api/**"
|
||||
# - ".github/workflows/api-build-lint-push-containers.yml"
|
||||
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
|
||||
WORKING_DIRECTORY: ./api
|
||||
|
||||
# Container Registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-api
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
# Build Prowler OSS container
|
||||
container-build-push:
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push container image (latest)
|
||||
# Comment the following line for testing
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
# Set push: false for testing
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push container image (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-api-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
@@ -0,0 +1,74 @@
|
||||
name: 'API: Code Quality'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-code-quality.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-code-quality.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
jobs:
|
||||
api-code-quality:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
api/docs/**
|
||||
api/README.md
|
||||
api/CHANGELOG.md
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Poetry check
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry check --lock
|
||||
|
||||
- name: Ruff lint
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run ruff check . --exclude contrib
|
||||
|
||||
- name: Ruff format
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run ruff format --check . --exclude contrib
|
||||
|
||||
- name: Pylint
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/
|
||||
@@ -25,7 +25,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
api-analyze:
|
||||
name: CodeQL Security Analysis
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
name: 'API: Container Build and Push'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'prowler/**'
|
||||
- '.github/workflows/api-build-lint-push-containers.yml'
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./api
|
||||
|
||||
# Container registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-api
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
short-sha: ${{ steps.set-short-sha.outputs.short-sha }}
|
||||
steps:
|
||||
- name: Calculate short SHA
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push API container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push API container (release)
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: API
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Trigger API deployment
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: api-prowler-deployment
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ needs.setup.outputs.short-sha }}"}'
|
||||
@@ -0,0 +1,94 @@
|
||||
name: 'API: Container Checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
API_WORKING_DIR: ./api
|
||||
IMAGE_NAME: prowler-api
|
||||
|
||||
jobs:
|
||||
api-dockerfile-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: api/Dockerfile
|
||||
|
||||
- name: Lint Dockerfile with Hadolint
|
||||
if: steps.dockerfile-changed.outputs.any_changed == 'true'
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
|
||||
with:
|
||||
dockerfile: api/Dockerfile
|
||||
ignore: DL3013
|
||||
|
||||
api-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
api/docs/**
|
||||
api/README.md
|
||||
api/CHANGELOG.md
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Scan container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
@@ -1,228 +0,0 @@
|
||||
name: 'API: Pull Request'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- '.github/workflows/api-pull-request.yml'
|
||||
- 'api/**'
|
||||
- '!api/docs/**'
|
||||
- '!api/README.md'
|
||||
- '!api/CHANGELOG.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- '.github/workflows/api-pull-request.yml'
|
||||
- 'api/**'
|
||||
- '!api/docs/**'
|
||||
- '!api/README.md'
|
||||
- '!api/CHANGELOG.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER: prowler
|
||||
POSTGRES_ADMIN_PASSWORD: S3cret
|
||||
POSTGRES_USER: prowler_user
|
||||
POSTGRES_PASSWORD: prowler
|
||||
POSTGRES_DB: postgres-db
|
||||
VALKEY_HOST: localhost
|
||||
VALKEY_PORT: 6379
|
||||
VALKEY_DB: 0
|
||||
API_WORKING_DIR: ./api
|
||||
IMAGE_NAME: prowler-api
|
||||
|
||||
jobs:
|
||||
code-quality:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Poetry check
|
||||
run: poetry check --lock
|
||||
|
||||
- name: Ruff lint
|
||||
run: poetry run ruff check . --exclude contrib
|
||||
|
||||
- name: Ruff format
|
||||
run: poetry run ruff format --check . --exclude contrib
|
||||
|
||||
- name: Pylint
|
||||
run: poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/
|
||||
|
||||
security-scans:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Bandit
|
||||
run: poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
|
||||
|
||||
- name: Safety
|
||||
# 76352, 76353, 77323 come from SDK, but they cannot upgrade it yet. It does not affect API
|
||||
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
|
||||
run: poetry run safety check --ignore 70612,66963,74429,76352,76353,77323,77744,77745
|
||||
|
||||
- name: Vulture
|
||||
run: poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 .
|
||||
|
||||
tests:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
env:
|
||||
POSTGRES_HOST: ${{ env.POSTGRES_HOST }}
|
||||
POSTGRES_PORT: ${{ env.POSTGRES_PORT }}
|
||||
POSTGRES_USER: ${{ env.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.POSTGRES_DB }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
valkey:
|
||||
image: valkey/valkey:7-alpine3.19
|
||||
env:
|
||||
VALKEY_HOST: ${{ env.VALKEY_HOST }}
|
||||
VALKEY_PORT: ${{ env.VALKEY_PORT }}
|
||||
VALKEY_DB: ${{ env.VALKEY_DB }}
|
||||
ports:
|
||||
- 6379:6379
|
||||
options: >-
|
||||
--health-cmd "valkey-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Run tests with pytest
|
||||
run: poetry run pytest --cov=./src/backend --cov-report=xml src/backend
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: api
|
||||
|
||||
dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Lint Dockerfile with Hadolint
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
|
||||
with:
|
||||
dockerfile: api/Dockerfile
|
||||
ignore: DL3013
|
||||
|
||||
container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Scan container with Trivy
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
@@ -0,0 +1,72 @@
|
||||
name: 'API: Security'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-security.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-security.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
jobs:
|
||||
api-security-scans:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
api/docs/**
|
||||
api/README.md
|
||||
api/CHANGELOG.md
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Bandit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
|
||||
|
||||
- name: Safety
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
# 76352, 76353, 77323 come from SDK, but they cannot upgrade it yet. It does not affect API
|
||||
# TODO: Botocore needs urllib3 1.X so we need to ignore these vulnerabilities 77744,77745. Remove this once we upgrade to urllib3 2.X
|
||||
run: poetry run safety check --ignore 70612,66963,74429,76352,76353,77323,77744,77745
|
||||
|
||||
- name: Vulture
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 .
|
||||
@@ -0,0 +1,110 @@
|
||||
name: 'API: Tests'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-tests.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'api/**'
|
||||
- '.github/workflows/api-tests.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER: prowler
|
||||
POSTGRES_ADMIN_PASSWORD: S3cret
|
||||
POSTGRES_USER: prowler_user
|
||||
POSTGRES_PASSWORD: prowler
|
||||
POSTGRES_DB: postgres-db
|
||||
VALKEY_HOST: localhost
|
||||
VALKEY_PORT: 6379
|
||||
VALKEY_DB: 0
|
||||
API_WORKING_DIR: ./api
|
||||
|
||||
jobs:
|
||||
api-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.12'
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./api
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
env:
|
||||
POSTGRES_HOST: ${{ env.POSTGRES_HOST }}
|
||||
POSTGRES_PORT: ${{ env.POSTGRES_PORT }}
|
||||
POSTGRES_USER: ${{ env.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ env.POSTGRES_DB }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
valkey:
|
||||
image: valkey/valkey:7-alpine3.19
|
||||
env:
|
||||
VALKEY_HOST: ${{ env.VALKEY_HOST }}
|
||||
VALKEY_PORT: ${{ env.VALKEY_PORT }}
|
||||
VALKEY_DB: ${{ env.VALKEY_DB }}
|
||||
ports:
|
||||
- 6379:6379
|
||||
options: >-
|
||||
--health-cmd "valkey-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for API changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
api/docs/**
|
||||
api/README.md
|
||||
api/CHANGELOG.md
|
||||
|
||||
- name: Setup Python with Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/setup-python-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ./api
|
||||
|
||||
- name: Run tests with pytest
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run pytest --cov=./src/backend --cov-report=xml src/backend
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: api
|
||||
@@ -7,8 +7,6 @@ on:
|
||||
types:
|
||||
- 'labeled'
|
||||
- 'closed'
|
||||
paths:
|
||||
- '.github/workflows/backport.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
|
||||
@@ -1,34 +1,43 @@
|
||||
name: Label Community Contributors PRs
|
||||
name: Community PR labelling
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened]
|
||||
# We need "write" permissions on the PR to be able to add a label.
|
||||
pull_request_target: # We need this to have labelling permissions. There are no user inputs here, so we should be fine.
|
||||
types:
|
||||
- opened
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
add-community-label:
|
||||
label-if-community:
|
||||
name: Add 'community' label if the PR is from a community contributor
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Label community contributors
|
||||
- name: Check if author is org member
|
||||
id: check_membership
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
ORG: ${{ github.repository_owner }}
|
||||
run: |
|
||||
# Fetch fresh PR data to get current author_association
|
||||
ASSOCIATION=$(gh api /repos/${{ github.repository }}/pulls/${{ github.event.number }} --jq '.author_association')
|
||||
AUTHOR=$(gh api /repos/${{ github.repository }}/pulls/${{ github.event.number }} --jq '.user.login')
|
||||
|
||||
echo "Author: $AUTHOR, Association: $ASSOCIATION"
|
||||
|
||||
# Members have associations like: OWNER, MEMBER, COLLABORATOR
|
||||
# Non-members have: CONTRIBUTOR, FIRST_TIME_CONTRIBUTOR, FIRST_TIMER, NONE
|
||||
if [[ "$ASSOCIATION" != "OWNER" && "$ASSOCIATION" != "MEMBER" && "$ASSOCIATION" != "COLLABORATOR" ]]; then
|
||||
gh api /repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
|
||||
-X POST \
|
||||
-f labels[]='community'
|
||||
echo "Added 'community' label for $ASSOCIATION contributor"
|
||||
echo "Checking if $AUTHOR is a member of $ORG"
|
||||
if gh api --method GET "orgs/$ORG/members/$AUTHOR" >/dev/null 2>&1; then
|
||||
echo "is_member=true" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is an organization member"
|
||||
else
|
||||
echo "Skipped labeling for $ASSOCIATION"
|
||||
echo "is_member=false" >> $GITHUB_OUTPUT
|
||||
echo "$AUTHOR is not an organization member"
|
||||
fi
|
||||
|
||||
- name: Add community label
|
||||
if: steps.check_membership.outputs.is_member == 'false'
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Adding 'community' label to PR #$PR_NUMBER"
|
||||
gh pr edit "$PR_NUMBER" --add-label community
|
||||
|
||||
@@ -16,7 +16,7 @@ permissions:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
# Tags
|
||||
@@ -80,8 +80,23 @@ jobs:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push MCP container (release)
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
@@ -100,8 +115,31 @@ jobs:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: MCP
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Trigger MCP deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
|
||||
+23
-11
@@ -1,4 +1,4 @@
|
||||
name: 'MCP: Pull Request'
|
||||
name: 'MCP: Container Checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,19 +6,15 @@ on:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- '.github/workflows/mcp-pull-request.yml'
|
||||
- 'mcp_server/**'
|
||||
- '!mcp_server/README.md'
|
||||
- '!mcp_server/CHANGELOG.md'
|
||||
- '.github/workflows/mcp-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- '.github/workflows/mcp-pull-request.yml'
|
||||
- 'mcp_server/**'
|
||||
- '!mcp_server/README.md'
|
||||
- '!mcp_server/CHANGELOG.md'
|
||||
- '.github/workflows/mcp-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -29,8 +25,7 @@ env:
|
||||
IMAGE_NAME: prowler-mcp
|
||||
|
||||
jobs:
|
||||
dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
mcp-dockerfile-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
@@ -40,13 +35,19 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: mcp_server/Dockerfile
|
||||
|
||||
- name: Lint Dockerfile with Hadolint
|
||||
if: steps.dockerfile-changed.outputs.any_changed == 'true'
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
|
||||
with:
|
||||
dockerfile: mcp_server/Dockerfile
|
||||
|
||||
container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
mcp-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
@@ -58,10 +59,20 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for MCP changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
mcp_server/README.md
|
||||
mcp_server/CHANGELOG.md
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build MCP container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.MCP_WORKING_DIR }}
|
||||
@@ -72,6 +83,7 @@ jobs:
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Scan MCP container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
@@ -1,202 +0,0 @@
|
||||
name: SDK - Build and Push containers
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
# For `v3-latest`
|
||||
- "v3"
|
||||
# For `v4-latest`
|
||||
- "v4.6"
|
||||
# For `latest`
|
||||
- "master"
|
||||
paths-ignore:
|
||||
- ".github/**"
|
||||
- "README.md"
|
||||
- "docs/**"
|
||||
- "ui/**"
|
||||
- "api/**"
|
||||
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
# AWS Configuration
|
||||
AWS_REGION_STG: eu-west-1
|
||||
AWS_REGION_PLATFORM: eu-west-1
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
# Container's configuration
|
||||
IMAGE_NAME: prowler
|
||||
DOCKERFILE_PATH: ./Dockerfile
|
||||
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
STABLE_TAG: stable
|
||||
# The RELEASE_TAG is set during runtime in releases
|
||||
RELEASE_TAG: ""
|
||||
# The PROWLER_VERSION and PROWLER_VERSION_MAJOR are set during runtime in releases
|
||||
PROWLER_VERSION: ""
|
||||
PROWLER_VERSION_MAJOR: ""
|
||||
# TEMPORARY_TAG: temporary
|
||||
|
||||
# Python configuration
|
||||
PYTHON_VERSION: 3.12
|
||||
|
||||
# Container Registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler
|
||||
|
||||
jobs:
|
||||
# Build Prowler OSS container
|
||||
container-build-push:
|
||||
# needs: dockerfile-linter
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.PROWLER_VERSION_MAJOR }}
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.PROWLER_VERSION }}
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: "false"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
pipx install poetry==2.*
|
||||
pipx inject poetry poetry-bumpversion
|
||||
|
||||
- name: Get Prowler version
|
||||
id: get-prowler-version
|
||||
run: |
|
||||
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
|
||||
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Store prowler version major just for the release
|
||||
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
|
||||
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
|
||||
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
case ${PROWLER_VERSION_MAJOR} in
|
||||
3)
|
||||
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
|
||||
;;
|
||||
|
||||
|
||||
4)
|
||||
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
|
||||
;;
|
||||
|
||||
5)
|
||||
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
|
||||
;;
|
||||
|
||||
*)
|
||||
# Fallback if any other version is present
|
||||
echo "Releasing another Prowler major version, aborting..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Public ECR
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.PUBLIC_ECR_AWS_SECRET_ACCESS_KEY }}
|
||||
env:
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push container image (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push container image (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
# Use local context to get changes
|
||||
# https://github.com/docker/build-push-action#path-context
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.PROWLER_VERSION }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# - name: Push README to Docker Hub (toniblyx)
|
||||
# uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 # v4.0.2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# repository: ${{ env.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}
|
||||
# readme-filepath: ./README.md
|
||||
#
|
||||
# - name: Push README to Docker Hub (prowlercloud)
|
||||
# uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 # v4.0.2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# repository: ${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}
|
||||
# readme-filepath: ./README.md
|
||||
|
||||
dispatch-action:
|
||||
needs: container-build-push
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get latest commit info (latest)
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
LATEST_COMMIT_HASH=$(echo ${{ github.event.after }} | cut -b -7)
|
||||
echo "LATEST_COMMIT_HASH=${LATEST_COMMIT_HASH}" >> $GITHUB_ENV
|
||||
|
||||
- name: Dispatch event (latest)
|
||||
if: github.event_name == 'push' && needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
run: |
|
||||
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--data '{"event_type":"dispatch","client_payload":{"version":"v3-latest", "tag": "${{ env.LATEST_COMMIT_HASH }}"}}'
|
||||
|
||||
- name: Dispatch event (release)
|
||||
if: github.event_name == 'release' && needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
run: |
|
||||
curl https://api.github.com/repos/${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}/dispatches \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
--data '{"event_type":"dispatch","client_payload":{"version":"release", "tag":"${{ needs.container-build-push.outputs.prowler_version }}"}}'
|
||||
@@ -1,146 +1,218 @@
|
||||
name: SDK - Bump Version
|
||||
name: 'SDK: Bump Version'
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
types:
|
||||
- 'published'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
PROWLER_VERSION: ${{ github.event.release.tag_name }}
|
||||
BASE_BRANCH: master
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
name: Bump Version
|
||||
detect-release-type:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
is_minor: ${{ steps.detect.outputs.is_minor }}
|
||||
is_patch: ${{ steps.detect.outputs.is_patch }}
|
||||
major_version: ${{ steps.detect.outputs.major_version }}
|
||||
minor_version: ${{ steps.detect.outputs.minor_version }}
|
||||
patch_version: ${{ steps.detect.outputs.patch_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Get Prowler version
|
||||
shell: bash
|
||||
- name: Detect release type and parse version
|
||||
id: detect
|
||||
run: |
|
||||
if [[ $PROWLER_VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
|
||||
MAJOR_VERSION=${BASH_REMATCH[1]}
|
||||
MINOR_VERSION=${BASH_REMATCH[2]}
|
||||
FIX_VERSION=${BASH_REMATCH[3]}
|
||||
PATCH_VERSION=${BASH_REMATCH[3]}
|
||||
|
||||
# Export version components to GitHub environment
|
||||
echo "MAJOR_VERSION=${MAJOR_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "MINOR_VERSION=${MINOR_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "FIX_VERSION=${FIX_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "major_version=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "minor_version=${MINOR_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "patch_version=${PATCH_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
if (( MAJOR_VERSION == 5 )); then
|
||||
if (( FIX_VERSION == 0 )); then
|
||||
echo "Minor Release: $PROWLER_VERSION"
|
||||
if (( MAJOR_VERSION != 5 )); then
|
||||
echo "::error::Releasing another Prowler major version, aborting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set up next minor version for master
|
||||
BUMP_VERSION_TO=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).${FIX_VERSION}
|
||||
echo "BUMP_VERSION_TO=${BUMP_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
TARGET_BRANCH=${BASE_BRANCH}
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Set up patch version for version branch
|
||||
PATCH_VERSION_TO=${MAJOR_VERSION}.${MINOR_VERSION}.1
|
||||
echo "PATCH_VERSION_TO=${PATCH_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping to next minor version: ${BUMP_VERSION_TO} in branch ${TARGET_BRANCH}"
|
||||
echo "Bumping to next patch version: ${PATCH_VERSION_TO} in branch ${VERSION_BRANCH}"
|
||||
else
|
||||
echo "Patch Release: $PROWLER_VERSION"
|
||||
|
||||
BUMP_VERSION_TO=${MAJOR_VERSION}.${MINOR_VERSION}.$((FIX_VERSION + 1))
|
||||
echo "BUMP_VERSION_TO=${BUMP_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
TARGET_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping to next patch version: ${BUMP_VERSION_TO} in branch ${TARGET_BRANCH}"
|
||||
fi
|
||||
if (( PATCH_VERSION == 0 )); then
|
||||
echo "is_minor=true" >> "${GITHUB_OUTPUT}"
|
||||
echo "is_patch=false" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Minor release detected: $PROWLER_VERSION"
|
||||
else
|
||||
echo "Releasing another Prowler major version, aborting..."
|
||||
exit 1
|
||||
echo "is_minor=false" >> "${GITHUB_OUTPUT}"
|
||||
echo "is_patch=true" >> "${GITHUB_OUTPUT}"
|
||||
echo "✓ Patch release detected: $PROWLER_VERSION"
|
||||
fi
|
||||
else
|
||||
echo "Invalid version syntax: '$PROWLER_VERSION' (must be N.N.N)" >&2
|
||||
echo "::error::Invalid version syntax: '$PROWLER_VERSION' (must be X.Y.Z)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Bump versions in files
|
||||
bump-minor-version:
|
||||
needs: detect-release-type
|
||||
if: needs.detect-release-type.outputs.is_minor == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Calculate next minor version
|
||||
run: |
|
||||
echo "Using PROWLER_VERSION=$PROWLER_VERSION"
|
||||
echo "Using BUMP_VERSION_TO=$BUMP_VERSION_TO"
|
||||
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
|
||||
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
|
||||
|
||||
set -e
|
||||
NEXT_MINOR_VERSION=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).0
|
||||
echo "NEXT_MINOR_VERSION=${NEXT_MINOR_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping version in pyproject.toml ..."
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${BUMP_VERSION_TO}\"|" pyproject.toml
|
||||
echo "Current version: $PROWLER_VERSION"
|
||||
echo "Next minor version: $NEXT_MINOR_VERSION"
|
||||
|
||||
echo "Bumping version in prowler/config/config.py ..."
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${BUMP_VERSION_TO}\"|" prowler/config/config.py
|
||||
- name: Bump versions in files for master
|
||||
run: |
|
||||
set -e
|
||||
|
||||
echo "Bumping version in .env ..."
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${BUMP_VERSION_TO}|" .env
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${NEXT_MINOR_VERSION}\"|" pyproject.toml
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${NEXT_MINOR_VERSION}\"|" prowler/config/config.py
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_MINOR_VERSION}|" .env
|
||||
|
||||
git --no-pager diff
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create Pull Request
|
||||
- name: Create PR for next minor version to master
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.TARGET_BRANCH }}
|
||||
commit-message: "chore(release): Bump version to v${{ env.BUMP_VERSION_TO }}"
|
||||
branch: "version-bump-to-v${{ env.BUMP_VERSION_TO }}"
|
||||
title: "chore(release): Bump version to v${{ env.BUMP_VERSION_TO }}"
|
||||
labels: no-changelog
|
||||
body: |
|
||||
### Description
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: master
|
||||
commit-message: 'chore(release): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
|
||||
branch: version-bump-to-v${{ env.NEXT_MINOR_VERSION }}
|
||||
title: 'chore(release): Bump version to v${{ env.NEXT_MINOR_VERSION }}'
|
||||
labels: no-changelog
|
||||
body: |
|
||||
### Description
|
||||
|
||||
Bump Prowler version to v${{ env.BUMP_VERSION_TO }}
|
||||
Bump Prowler version to v${{ env.NEXT_MINOR_VERSION }} after releasing v${{ env.PROWLER_VERSION }}.
|
||||
|
||||
### License
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Handle patch version for minor release
|
||||
if: env.FIX_VERSION == '0'
|
||||
- name: Checkout version branch
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: v${{ needs.detect-release-type.outputs.major_version }}.${{ needs.detect-release-type.outputs.minor_version }}
|
||||
|
||||
- name: Calculate first patch version
|
||||
run: |
|
||||
echo "Using PROWLER_VERSION=$PROWLER_VERSION"
|
||||
echo "Using PATCH_VERSION_TO=$PATCH_VERSION_TO"
|
||||
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
|
||||
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
|
||||
|
||||
set -e
|
||||
FIRST_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.1
|
||||
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
|
||||
echo "Bumping version in pyproject.toml ..."
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${PATCH_VERSION_TO}\"|" pyproject.toml
|
||||
echo "FIRST_PATCH_VERSION=${FIRST_PATCH_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping version in prowler/config/config.py ..."
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${PATCH_VERSION_TO}\"|" prowler/config/config.py
|
||||
echo "First patch version: $FIRST_PATCH_VERSION"
|
||||
echo "Version branch: $VERSION_BRANCH"
|
||||
|
||||
echo "Bumping version in .env ..."
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PATCH_VERSION_TO}|" .env
|
||||
- name: Bump versions in files for version branch
|
||||
run: |
|
||||
set -e
|
||||
|
||||
git --no-pager diff
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${FIRST_PATCH_VERSION}\"|" pyproject.toml
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${FIRST_PATCH_VERSION}\"|" prowler/config/config.py
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${FIRST_PATCH_VERSION}|" .env
|
||||
|
||||
- name: Create Pull Request for patch version
|
||||
if: env.FIX_VERSION == '0'
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for first patch version to version branch
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.VERSION_BRANCH }}
|
||||
commit-message: "chore(release): Bump version to v${{ env.PATCH_VERSION_TO }}"
|
||||
branch: "version-bump-to-v${{ env.PATCH_VERSION_TO }}"
|
||||
title: "chore(release): Bump version to v${{ env.PATCH_VERSION_TO }}"
|
||||
labels: no-changelog
|
||||
body: |
|
||||
### Description
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.VERSION_BRANCH }}
|
||||
commit-message: 'chore(release): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
|
||||
branch: version-bump-to-v${{ env.FIRST_PATCH_VERSION }}
|
||||
title: 'chore(release): Bump version to v${{ env.FIRST_PATCH_VERSION }}'
|
||||
labels: no-changelog
|
||||
body: |
|
||||
### Description
|
||||
|
||||
Bump Prowler version to v${{ env.PATCH_VERSION_TO }}
|
||||
Bump Prowler version to v${{ env.FIRST_PATCH_VERSION }} in version branch after releasing v${{ env.PROWLER_VERSION }}.
|
||||
|
||||
### License
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
bump-patch-version:
|
||||
needs: detect-release-type
|
||||
if: needs.detect-release-type.outputs.is_patch == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Calculate next patch version
|
||||
run: |
|
||||
MAJOR_VERSION=${{ needs.detect-release-type.outputs.major_version }}
|
||||
MINOR_VERSION=${{ needs.detect-release-type.outputs.minor_version }}
|
||||
PATCH_VERSION=${{ needs.detect-release-type.outputs.patch_version }}
|
||||
|
||||
NEXT_PATCH_VERSION=${MAJOR_VERSION}.${MINOR_VERSION}.$((PATCH_VERSION + 1))
|
||||
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
|
||||
echo "NEXT_PATCH_VERSION=${NEXT_PATCH_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Current version: $PROWLER_VERSION"
|
||||
echo "Next patch version: $NEXT_PATCH_VERSION"
|
||||
echo "Target branch: $VERSION_BRANCH"
|
||||
|
||||
- name: Bump versions in files for version branch
|
||||
run: |
|
||||
set -e
|
||||
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${NEXT_PATCH_VERSION}\"|" pyproject.toml
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${NEXT_PATCH_VERSION}\"|" prowler/config/config.py
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${NEXT_PATCH_VERSION}|" .env
|
||||
|
||||
echo "Files modified:"
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create PR for next patch version to version branch
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.VERSION_BRANCH }}
|
||||
commit-message: 'chore(release): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
|
||||
branch: version-bump-to-v${{ env.NEXT_PATCH_VERSION }}
|
||||
title: 'chore(release): Bump version to v${{ env.NEXT_PATCH_VERSION }}'
|
||||
labels: no-changelog
|
||||
body: |
|
||||
### Description
|
||||
|
||||
Bump Prowler version to v${{ env.NEXT_PATCH_VERSION }} after releasing v${{ env.PROWLER_VERSION }}.
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
name: 'SDK: Code Quality'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-code-quality.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-code-quality.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sdk-code-quality:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
prowler/CHANGELOG.md
|
||||
docs/**
|
||||
permissions/**
|
||||
api/**
|
||||
ui/**
|
||||
dashboard/**
|
||||
mcp_server/**
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
contrib/**
|
||||
|
||||
- name: Install Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
|
||||
- name: Check Poetry lock file
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry check --lock
|
||||
|
||||
- name: Lint with flake8
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run flake8 . --ignore=E266,W503,E203,E501,W605,E128 --exclude contrib,ui,api
|
||||
|
||||
- name: Check format with black
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run black --exclude api ui --check .
|
||||
|
||||
- name: Lint with pylint
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run pylint --disable=W,C,R,E -j 0 -rn -sn prowler/
|
||||
@@ -31,7 +31,8 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
sdk-analyze:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
name: CodeQL Security Analysis
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
@@ -0,0 +1,217 @@
|
||||
name: 'SDK: Container Build and Push'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v3' # For v3-latest
|
||||
- 'v4.6' # For v4-latest
|
||||
- 'master' # For latest
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '!.github/workflows/sdk-container-build-push.yml'
|
||||
- 'README.md'
|
||||
- 'docs/**'
|
||||
- 'ui/**'
|
||||
- 'api/**'
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
# Container configuration
|
||||
IMAGE_NAME: prowler
|
||||
DOCKERFILE_PATH: ./Dockerfile
|
||||
|
||||
# Python configuration
|
||||
PYTHON_VERSION: '3.12'
|
||||
|
||||
# Tags (dynamically set based on version)
|
||||
LATEST_TAG: latest
|
||||
STABLE_TAG: stable
|
||||
|
||||
# Container registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler
|
||||
|
||||
# AWS configuration (for ECR)
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
container-build-push:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
outputs:
|
||||
prowler_version: ${{ steps.get-prowler-version.outputs.prowler_version }}
|
||||
prowler_version_major: ${{ steps.get-prowler-version.outputs.prowler_version_major }}
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: 'false'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
pipx install poetry==2.1.1
|
||||
pipx inject poetry poetry-bumpversion
|
||||
|
||||
- name: Get Prowler version and set tags
|
||||
id: get-prowler-version
|
||||
run: |
|
||||
PROWLER_VERSION="$(poetry version -s 2>/dev/null)"
|
||||
echo "prowler_version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION=${PROWLER_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Extract major version
|
||||
PROWLER_VERSION_MAJOR="${PROWLER_VERSION%%.*}"
|
||||
echo "prowler_version_major=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_OUTPUT}"
|
||||
echo "PROWLER_VERSION_MAJOR=${PROWLER_VERSION_MAJOR}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Set version-specific tags
|
||||
case ${PROWLER_VERSION_MAJOR} in
|
||||
3)
|
||||
echo "LATEST_TAG=v3-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v3-stable" >> "${GITHUB_ENV}"
|
||||
echo "✓ Prowler v3 detected - tags: v3-latest, v3-stable"
|
||||
;;
|
||||
4)
|
||||
echo "LATEST_TAG=v4-latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=v4-stable" >> "${GITHUB_ENV}"
|
||||
echo "✓ Prowler v4 detected - tags: v4-latest, v4-stable"
|
||||
;;
|
||||
5)
|
||||
echo "LATEST_TAG=latest" >> "${GITHUB_ENV}"
|
||||
echo "STABLE_TAG=stable" >> "${GITHUB_ENV}"
|
||||
echo "✓ Prowler v5 detected - tags: latest, stable"
|
||||
;;
|
||||
*)
|
||||
echo "::error::Unsupported Prowler major version: ${PROWLER_VERSION_MAJOR}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Public ECR
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.PUBLIC_ECR_AWS_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.PUBLIC_ECR_AWS_SECRET_ACCESS_KEY }}
|
||||
env:
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push SDK container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push SDK container (release)
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKERFILE_PATH }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.DOCKER_HUB_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.PROWLER_VERSION }}
|
||||
${{ secrets.PUBLIC_ECR_REPOSITORY }}/${{ env.IMAGE_NAME }}:${{ env.STABLE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.PROWLER_VERSION }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: SDK
|
||||
RELEASE_TAG: ${{ env.PROWLER_VERSION }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
dispatch-v3-deployment:
|
||||
if: needs.container-build-push.outputs.prowler_version_major == '3'
|
||||
needs: container-build-push
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Calculate short SHA
|
||||
id: short-sha
|
||||
run: echo "short_sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Dispatch v3 deployment (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"v3-latest","tag":"${{ steps.short-sha.outputs.short_sha }}"}'
|
||||
|
||||
- name: Dispatch v3 deployment (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.DISPATCH_OWNER }}/${{ secrets.DISPATCH_REPO }}
|
||||
event-type: dispatch
|
||||
client-payload: '{"version":"release","tag":"${{ needs.container-build-push.outputs.prowler_version }}"}'
|
||||
@@ -0,0 +1,111 @@
|
||||
name: 'SDK: Container Checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'Dockerfile'
|
||||
- '.github/workflows/sdk-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'Dockerfile'
|
||||
- '.github/workflows/sdk-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
IMAGE_NAME: prowler
|
||||
|
||||
jobs:
|
||||
sdk-dockerfile-lint:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: Dockerfile
|
||||
|
||||
- name: Lint Dockerfile with Hadolint
|
||||
if: steps.dockerfile-changed.outputs.any_changed == 'true'
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
|
||||
with:
|
||||
dockerfile: Dockerfile
|
||||
ignore: DL3013
|
||||
|
||||
sdk-container-build-and-scan:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
prowler/CHANGELOG.md
|
||||
docs/**
|
||||
permissions/**
|
||||
api/**
|
||||
ui/**
|
||||
dashboard/**
|
||||
mcp_server/**
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
contrib/**
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build SDK container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Scan SDK container with Trivy
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
@@ -1,286 +0,0 @@
|
||||
name: SDK - Pull Request
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Test if changes are in not ignored paths
|
||||
id: are-non-ignored-files-changed
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: ./**
|
||||
files_ignore: |
|
||||
.github/**
|
||||
docs/**
|
||||
permissions/**
|
||||
api/**
|
||||
ui/**
|
||||
prowler/CHANGELOG.md
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: "poetry"
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
|
||||
grep '"tag_name":' | \
|
||||
sed -E 's/.*"v([^"]+)".*/\1/' \
|
||||
) && curl -L -o /tmp/hadolint "https://github.com/hadolint/hadolint/releases/download/v${VERSION}/hadolint-Linux-x86_64" \
|
||||
&& chmod +x /tmp/hadolint
|
||||
|
||||
- name: Poetry check
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry check --lock
|
||||
|
||||
- name: Lint with flake8
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run flake8 . --ignore=E266,W503,E203,E501,W605,E128 --exclude contrib,ui,api
|
||||
|
||||
- name: Checking format with black
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run black --exclude api ui --check .
|
||||
|
||||
- name: Lint with pylint
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pylint --disable=W,C,R,E -j 0 -rn -sn prowler/
|
||||
|
||||
- name: Bandit
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run bandit -q -lll -x '*_test.py,./contrib/,./api/,./ui' -r .
|
||||
|
||||
- name: Safety
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run safety check --ignore 70612 -r pyproject.toml
|
||||
|
||||
- name: Vulture
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run vulture --exclude "contrib,api,ui" --min-confidence 100 .
|
||||
|
||||
- name: Dockerfile - Check if Dockerfile has changed
|
||||
id: dockerfile-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
Dockerfile
|
||||
|
||||
- name: Hadolint
|
||||
if: steps.dockerfile-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
/tmp/hadolint Dockerfile --ignore=DL3013
|
||||
|
||||
# Test AWS
|
||||
- name: AWS - Check if any file has changed
|
||||
id: aws-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/aws/**
|
||||
./tests/providers/aws/**
|
||||
./poetry.lock
|
||||
|
||||
- name: AWS - Test
|
||||
if: steps.aws-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
|
||||
# Test Azure
|
||||
- name: Azure - Check if any file has changed
|
||||
id: azure-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/azure/**
|
||||
./tests/providers/azure/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Azure - Test
|
||||
if: steps.azure-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/azure --cov-report=xml:azure_coverage.xml tests/providers/azure
|
||||
|
||||
# Test GCP
|
||||
- name: GCP - Check if any file has changed
|
||||
id: gcp-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/gcp/**
|
||||
./tests/providers/gcp/**
|
||||
./poetry.lock
|
||||
|
||||
- name: GCP - Test
|
||||
if: steps.gcp-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/gcp --cov-report=xml:gcp_coverage.xml tests/providers/gcp
|
||||
|
||||
# Test Kubernetes
|
||||
- name: Kubernetes - Check if any file has changed
|
||||
id: kubernetes-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/kubernetes/**
|
||||
./tests/providers/kubernetes/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Kubernetes - Test
|
||||
if: steps.kubernetes-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/kubernetes --cov-report=xml:kubernetes_coverage.xml tests/providers/kubernetes
|
||||
|
||||
# Test GitHub
|
||||
- name: GitHub - Check if any file has changed
|
||||
id: github-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/github/**
|
||||
./tests/providers/github/**
|
||||
./poetry.lock
|
||||
|
||||
- name: GitHub - Test
|
||||
if: steps.github-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/github --cov-report=xml:github_coverage.xml tests/providers/github
|
||||
|
||||
# Test NHN
|
||||
- name: NHN - Check if any file has changed
|
||||
id: nhn-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/nhn/**
|
||||
./tests/providers/nhn/**
|
||||
./poetry.lock
|
||||
|
||||
- name: NHN - Test
|
||||
if: steps.nhn-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/nhn --cov-report=xml:nhn_coverage.xml tests/providers/nhn
|
||||
|
||||
# Test M365
|
||||
- name: M365 - Check if any file has changed
|
||||
id: m365-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/m365/**
|
||||
./tests/providers/m365/**
|
||||
./poetry.lock
|
||||
|
||||
- name: M365 - Test
|
||||
if: steps.m365-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/m365 --cov-report=xml:m365_coverage.xml tests/providers/m365
|
||||
|
||||
# Test IaC
|
||||
- name: IaC - Check if any file has changed
|
||||
id: iac-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/iac/**
|
||||
./tests/providers/iac/**
|
||||
./poetry.lock
|
||||
|
||||
- name: IaC - Test
|
||||
if: steps.iac-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/iac --cov-report=xml:iac_coverage.xml tests/providers/iac
|
||||
|
||||
# Test MongoDB Atlas
|
||||
- name: MongoDB Atlas - Check if any file has changed
|
||||
id: mongodb-atlas-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/mongodbatlas/**
|
||||
./tests/providers/mongodbatlas/**
|
||||
.poetry.lock
|
||||
|
||||
- name: MongoDB Atlas - Test
|
||||
if: steps.mongodb-atlas-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/mongodbatlas --cov-report=xml:mongodb_atlas_coverage.xml tests/providers/mongodbatlas
|
||||
|
||||
# Test OCI
|
||||
- name: OCI - Check if any file has changed
|
||||
id: oci-changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/oraclecloud/**
|
||||
./tests/providers/oraclecloud/**
|
||||
./poetry.lock
|
||||
|
||||
- name: OCI - Test
|
||||
if: steps.oci-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/oraclecloud --cov-report=xml:oci_coverage.xml tests/providers/oraclecloud
|
||||
|
||||
# Common Tests
|
||||
- name: Lib - Test
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/lib --cov-report=xml:lib_coverage.xml tests/lib
|
||||
|
||||
- name: Config - Test
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/config --cov-report=xml:config_coverage.xml tests/config
|
||||
|
||||
# Codecov
|
||||
- name: Upload coverage reports to Codecov
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler
|
||||
files: ./aws_coverage.xml,./azure_coverage.xml,./gcp_coverage.xml,./kubernetes_coverage.xml,./github_coverage.xml,./nhn_coverage.xml,./m365_coverage.xml,./oci_coverage.xml,./lib_coverage.xml,./config_coverage.xml
|
||||
@@ -1,98 +1,119 @@
|
||||
name: SDK - PyPI release
|
||||
name: 'SDK: PyPI Release'
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
types:
|
||||
- 'published'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.release.tag_name }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
PYTHON_VERSION: 3.11
|
||||
# CACHE: "poetry"
|
||||
PYTHON_VERSION: '3.12'
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
validate-release:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
prowler_version: ${{ steps.parse-version.outputs.version }}
|
||||
major_version: ${{ steps.parse-version.outputs.major }}
|
||||
|
||||
release-prowler-job:
|
||||
runs-on: ubuntu-latest
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: "false"
|
||||
name: Release Prowler to PyPI
|
||||
steps:
|
||||
- name: Repository check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ "${{ github.repository }}" != "prowler-cloud/prowler" ]]; then
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Get Prowler version
|
||||
- name: Parse and validate version
|
||||
id: parse-version
|
||||
run: |
|
||||
PROWLER_VERSION="${{ env.RELEASE_TAG }}"
|
||||
echo "version=${PROWLER_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
case ${PROWLER_VERSION%%.*} in
|
||||
3)
|
||||
echo "Releasing Prowler v3 with tag ${PROWLER_VERSION}"
|
||||
# Extract major version
|
||||
MAJOR_VERSION="${PROWLER_VERSION%%.*}"
|
||||
echo "major=${MAJOR_VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Validate major version
|
||||
case ${MAJOR_VERSION} in
|
||||
3|4|5)
|
||||
echo "✓ Releasing Prowler v${MAJOR_VERSION} with tag ${PROWLER_VERSION}"
|
||||
;;
|
||||
4)
|
||||
echo "Releasing Prowler v4 with tag ${PROWLER_VERSION}"
|
||||
;;
|
||||
5)
|
||||
echo "Releasing Prowler v5 with tag ${PROWLER_VERSION}"
|
||||
;;
|
||||
*)
|
||||
echo "Releasing another Prowler major version, aborting..."
|
||||
*)
|
||||
echo "::error::Unsupported Prowler major version: ${MAJOR_VERSION}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
publish-prowler:
|
||||
needs: validate-release
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
environment:
|
||||
name: pypi-prowler
|
||||
url: https://pypi.org/project/prowler/${{ needs.validate-release.outputs.prowler_version }}/
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pipx install poetry==2.1.1
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Python
|
||||
- name: Install Poetry
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
# cache: ${{ env.CACHE }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Build Prowler package
|
||||
run: |
|
||||
poetry build
|
||||
run: poetry build
|
||||
|
||||
- name: Publish Prowler package to PyPI
|
||||
run: |
|
||||
poetry config pypi-token.pypi ${{ secrets.PYPI_API_TOKEN }}
|
||||
poetry publish
|
||||
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
|
||||
with:
|
||||
print-hash: true
|
||||
|
||||
- name: Replicate PyPI package
|
||||
publish-prowler-cloud:
|
||||
needs: validate-release
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
environment:
|
||||
name: pypi-prowler-cloud
|
||||
url: https://pypi.org/project/prowler-cloud/${{ needs.validate-release.outputs.prowler_version }}/
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Poetry
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install toml package
|
||||
run: pip install toml
|
||||
|
||||
- name: Replicate PyPI package for prowler-cloud
|
||||
run: |
|
||||
rm -rf ./dist && rm -rf ./build && rm -rf prowler.egg-info
|
||||
pip install toml
|
||||
rm -rf ./dist ./build prowler.egg-info
|
||||
python util/replicate_pypi_package.py
|
||||
poetry build
|
||||
|
||||
- name: Build prowler-cloud package
|
||||
run: poetry build
|
||||
|
||||
- name: Publish prowler-cloud package to PyPI
|
||||
run: |
|
||||
poetry config pypi-token.pypi ${{ secrets.PYPI_API_TOKEN }}
|
||||
poetry publish
|
||||
uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
|
||||
with:
|
||||
print-hash: true
|
||||
|
||||
@@ -1,68 +1,90 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: SDK - Refresh AWS services' regions
|
||||
name: 'SDK: Refresh AWS Regions'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * 1" # runs at 09:00 UTC every Monday
|
||||
- cron: '0 9 * * 1' # Every Monday at 09:00 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
GITHUB_BRANCH: "master"
|
||||
AWS_REGION_DEV: us-east-1
|
||||
PYTHON_VERSION: '3.12'
|
||||
AWS_REGION: 'us-east-1'
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
# This workflow contains a single job called "build"
|
||||
build:
|
||||
# The type of runner that the job will run on
|
||||
refresh-aws-regions:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
id-token: write
|
||||
pull-requests: write
|
||||
contents: write
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ env.GITHUB_BRANCH }}
|
||||
|
||||
- name: setup python
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: 'master'
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: 3.9 #install the python needed
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install boto3
|
||||
run: pip install boto3
|
||||
|
||||
- name: Configure AWS Credentials -- DEV
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@a03048d87541d1d9fcf2ecf528a4a65ba9bd7838 # v5.0.0
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION_DEV }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ secrets.DEV_IAM_ROLE_ARN }}
|
||||
role-session-name: refresh-AWS-regions-dev
|
||||
role-session-name: prowler-refresh-aws-regions
|
||||
|
||||
# Runs a single command using the runners shell
|
||||
- name: Run a one-line script
|
||||
run: python3 util/update_aws_services_regions.py
|
||||
- name: Update AWS services regions
|
||||
run: python util/update_aws_services_regions.py
|
||||
|
||||
# Create pull request
|
||||
- name: Create Pull Request
|
||||
- name: Create pull request
|
||||
id: create-pr
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
commit-message: "feat(regions_update): Update regions for AWS services"
|
||||
branch: "aws-services-regions-updated-${{ github.sha }}"
|
||||
labels: "status/waiting-for-revision, severity/low, provider/aws, no-changelog"
|
||||
title: "chore(regions_update): Changes in regions for AWS services"
|
||||
author: 'prowler-bot <179230569+prowler-bot@users.noreply.github.com>'
|
||||
committer: 'github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>'
|
||||
commit-message: 'feat(aws): update regions for AWS services'
|
||||
branch: 'aws-regions-update-${{ github.run_number }}'
|
||||
title: 'feat(aws): Update regions for AWS services'
|
||||
labels: |
|
||||
status/waiting-for-revision
|
||||
severity/low
|
||||
provider/aws
|
||||
no-changelog
|
||||
body: |
|
||||
### Description
|
||||
|
||||
This PR updates the regions for AWS services.
|
||||
Automated update of AWS service regions from the official AWS IP ranges.
|
||||
|
||||
**Trigger:** ${{ github.event_name == 'schedule' && 'Scheduled (weekly)' || github.event_name == 'workflow_dispatch' && 'Manual' || 'Workflow update' }}
|
||||
**Run:** [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
|
||||
|
||||
### Checklist
|
||||
|
||||
- [x] This is an automated update from AWS official sources
|
||||
- [x] No manual review of region data required
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: PR creation result
|
||||
run: |
|
||||
if [[ "${{ steps.create-pr.outputs.pull-request-number }}" ]]; then
|
||||
echo "✓ Pull request #${{ steps.create-pr.outputs.pull-request-number }} created successfully"
|
||||
echo "URL: ${{ steps.create-pr.outputs.pull-request-url }}"
|
||||
else
|
||||
echo "✓ No changes detected - AWS regions are up to date"
|
||||
fi
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
name: 'SDK: Security'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-security.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-security.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sdk-security-scans:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
prowler/CHANGELOG.md
|
||||
docs/**
|
||||
permissions/**
|
||||
api/**
|
||||
ui/**
|
||||
dashboard/**
|
||||
mcp_server/**
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
contrib/**
|
||||
|
||||
- name: Install Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python 3.12
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry install --no-root
|
||||
|
||||
- name: Security scan with Bandit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run bandit -q -lll -x '*_test.py,./contrib/,./api/,./ui' -r .
|
||||
|
||||
- name: Security scan with Safety
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run safety check --ignore 70612 -r pyproject.toml
|
||||
|
||||
- name: Dead code detection with Vulture
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry run vulture --exclude "contrib,api,ui" --min-confidence 100 .
|
||||
@@ -0,0 +1,370 @@
|
||||
name: 'SDK: Tests'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-tests.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'prowler/**'
|
||||
- 'tests/**'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/sdk-tests.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sdk-tests:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 120
|
||||
permissions:
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for SDK changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
prowler/CHANGELOG.md
|
||||
docs/**
|
||||
permissions/**
|
||||
api/**
|
||||
ui/**
|
||||
dashboard/**
|
||||
mcp_server/**
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
contrib/**
|
||||
|
||||
- name: Install Poetry
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'poetry'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: poetry install --no-root
|
||||
|
||||
# AWS Provider
|
||||
- name: Check if AWS files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-aws
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/aws/**
|
||||
./tests/**/aws/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run AWS tests
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/aws --cov-report=xml:aws_coverage.xml tests/providers/aws
|
||||
|
||||
- name: Upload AWS coverage to Codecov
|
||||
if: steps.changed-aws.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-aws
|
||||
files: ./aws_coverage.xml
|
||||
|
||||
# Azure Provider
|
||||
- name: Check if Azure files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-azure
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/azure/**
|
||||
./tests/**/azure/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Azure tests
|
||||
if: steps.changed-azure.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/azure --cov-report=xml:azure_coverage.xml tests/providers/azure
|
||||
|
||||
- name: Upload Azure coverage to Codecov
|
||||
if: steps.changed-azure.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-azure
|
||||
files: ./azure_coverage.xml
|
||||
|
||||
# GCP Provider
|
||||
- name: Check if GCP files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-gcp
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/gcp/**
|
||||
./tests/**/gcp/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run GCP tests
|
||||
if: steps.changed-gcp.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/gcp --cov-report=xml:gcp_coverage.xml tests/providers/gcp
|
||||
|
||||
- name: Upload GCP coverage to Codecov
|
||||
if: steps.changed-gcp.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-gcp
|
||||
files: ./gcp_coverage.xml
|
||||
|
||||
# Kubernetes Provider
|
||||
- name: Check if Kubernetes files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-kubernetes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/kubernetes/**
|
||||
./tests/**/kubernetes/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Kubernetes tests
|
||||
if: steps.changed-kubernetes.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/kubernetes --cov-report=xml:kubernetes_coverage.xml tests/providers/kubernetes
|
||||
|
||||
- name: Upload Kubernetes coverage to Codecov
|
||||
if: steps.changed-kubernetes.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-kubernetes
|
||||
files: ./kubernetes_coverage.xml
|
||||
|
||||
# GitHub Provider
|
||||
- name: Check if GitHub files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-github
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/github/**
|
||||
./tests/**/github/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run GitHub tests
|
||||
if: steps.changed-github.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/github --cov-report=xml:github_coverage.xml tests/providers/github
|
||||
|
||||
- name: Upload GitHub coverage to Codecov
|
||||
if: steps.changed-github.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-github
|
||||
files: ./github_coverage.xml
|
||||
|
||||
# NHN Provider
|
||||
- name: Check if NHN files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-nhn
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/nhn/**
|
||||
./tests/**/nhn/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run NHN tests
|
||||
if: steps.changed-nhn.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/nhn --cov-report=xml:nhn_coverage.xml tests/providers/nhn
|
||||
|
||||
- name: Upload NHN coverage to Codecov
|
||||
if: steps.changed-nhn.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-nhn
|
||||
files: ./nhn_coverage.xml
|
||||
|
||||
# M365 Provider
|
||||
- name: Check if M365 files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-m365
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/m365/**
|
||||
./tests/**/m365/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run M365 tests
|
||||
if: steps.changed-m365.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/m365 --cov-report=xml:m365_coverage.xml tests/providers/m365
|
||||
|
||||
- name: Upload M365 coverage to Codecov
|
||||
if: steps.changed-m365.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-m365
|
||||
files: ./m365_coverage.xml
|
||||
|
||||
# IaC Provider
|
||||
- name: Check if IaC files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-iac
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/iac/**
|
||||
./tests/**/iac/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run IaC tests
|
||||
if: steps.changed-iac.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/iac --cov-report=xml:iac_coverage.xml tests/providers/iac
|
||||
|
||||
- name: Upload IaC coverage to Codecov
|
||||
if: steps.changed-iac.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-iac
|
||||
files: ./iac_coverage.xml
|
||||
|
||||
# MongoDB Atlas Provider
|
||||
- name: Check if MongoDB Atlas files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-mongodbatlas
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/mongodbatlas/**
|
||||
./tests/**/mongodbatlas/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run MongoDB Atlas tests
|
||||
if: steps.changed-mongodbatlas.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/mongodbatlas --cov-report=xml:mongodbatlas_coverage.xml tests/providers/mongodbatlas
|
||||
|
||||
- name: Upload MongoDB Atlas coverage to Codecov
|
||||
if: steps.changed-mongodbatlas.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-mongodbatlas
|
||||
files: ./mongodbatlas_coverage.xml
|
||||
|
||||
# OCI Provider
|
||||
- name: Check if OCI files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-oraclecloud
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/oraclecloud/**
|
||||
./tests/**/oraclecloud/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run OCI tests
|
||||
if: steps.changed-oraclecloud.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/oraclecloud --cov-report=xml:oraclecloud_coverage.xml tests/providers/oraclecloud
|
||||
|
||||
- name: Upload OCI coverage to Codecov
|
||||
if: steps.changed-oraclecloud.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-oraclecloud
|
||||
files: ./oraclecloud_coverage.xml
|
||||
|
||||
# Lib
|
||||
- name: Check if Lib files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-lib
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/lib/**
|
||||
./tests/lib/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Lib tests
|
||||
if: steps.changed-lib.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/lib --cov-report=xml:lib_coverage.xml tests/lib
|
||||
|
||||
- name: Upload Lib coverage to Codecov
|
||||
if: steps.changed-lib.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-lib
|
||||
files: ./lib_coverage.xml
|
||||
|
||||
# Config
|
||||
- name: Check if Config files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-config
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: |
|
||||
./prowler/config/**
|
||||
./tests/config/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Config tests
|
||||
if: steps.changed-config.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/config --cov-report=xml:config_coverage.xml tests/config
|
||||
|
||||
- name: Upload Config coverage to Codecov
|
||||
if: steps.changed-config.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-config
|
||||
files: ./config_coverage.xml
|
||||
@@ -1,121 +0,0 @@
|
||||
name: UI - Build and Push containers
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
paths:
|
||||
- "ui/**"
|
||||
- ".github/workflows/ui-build-lint-push-containers.yml"
|
||||
|
||||
# Uncomment the below code to test this action on PRs
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - "master"
|
||||
# paths:
|
||||
# - "ui/**"
|
||||
# - ".github/workflows/ui-build-lint-push-containers.yml"
|
||||
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
|
||||
WORKING_DIRECTORY: ./ui
|
||||
|
||||
# Container Registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-ui
|
||||
NEXT_PUBLIC_API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
# Build Prowler OSS container
|
||||
container-build-push:
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push container image (latest)
|
||||
# Comment the following line for testing
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ env.SHORT_SHA }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
# Set push: false for testing
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push container image (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${{ env.RELEASE_TAG }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-ui-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
@@ -27,7 +27,8 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
ui-analyze:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
name: CodeQL Security Analysis
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
name: 'UI: Container Build and Push'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-container-build-push.yml'
|
||||
release:
|
||||
types:
|
||||
- 'published'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
WORKING_DIRECTORY: ./ui
|
||||
|
||||
# Container registries
|
||||
PROWLERCLOUD_DOCKERHUB_REPOSITORY: prowlercloud
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-ui
|
||||
|
||||
# Build args
|
||||
NEXT_PUBLIC_API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
if: github.repository == 'prowler-cloud/prowler'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
short-sha: ${{ steps.set-short-sha.outputs.short-sha }}
|
||||
steps:
|
||||
- name: Calculate short SHA
|
||||
id: set-short-sha
|
||||
run: echo "short-sha=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT
|
||||
|
||||
container-build-push:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build and push UI container (latest)
|
||||
if: github.event_name == 'push'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ needs.setup.outputs.short-sha }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ needs.setup.outputs.short-sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push started
|
||||
if: github.event_name == 'release'
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-started.json"
|
||||
|
||||
- name: Build and push UI container (release)
|
||||
if: github.event_name == 'release'
|
||||
id: container-push
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${{ env.RELEASE_TAG }}
|
||||
NEXT_PUBLIC_API_BASE_URL=${{ env.NEXT_PUBLIC_API_BASE_URL }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Notify container push completed
|
||||
if: github.event_name == 'release' && always()
|
||||
uses: ./.github/actions/slack-notification
|
||||
env:
|
||||
SLACK_CHANNEL_ID: ${{ secrets.SLACK_PLATFORM_DEPLOYMENTS }}
|
||||
COMPONENT: UI
|
||||
RELEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
with:
|
||||
slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
payload-file-path: "./.github/scripts/slack-messages/container-release-completed.json"
|
||||
step-outcome: ${{ steps.container-push.outcome }}
|
||||
|
||||
trigger-deployment:
|
||||
if: github.event_name == 'push'
|
||||
needs: [setup, container-build-push]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Trigger UI deployment
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: ui-prowler-deployment
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ needs.setup.outputs.short-sha }}"}'
|
||||
@@ -0,0 +1,96 @@
|
||||
name: 'UI: Container Checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-container-checks.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-container-checks.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
IMAGE_NAME: prowler-ui
|
||||
|
||||
jobs:
|
||||
ui-dockerfile-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check if Dockerfile changed
|
||||
id: dockerfile-changed
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files: ui/Dockerfile
|
||||
|
||||
- name: Lint Dockerfile with Hadolint
|
||||
if: steps.dockerfile-changed.outputs.any_changed == 'true'
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0
|
||||
with:
|
||||
dockerfile: ui/Dockerfile
|
||||
ignore: DL3018
|
||||
|
||||
ui-container-build-and-scan:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for UI changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
ui/CHANGELOG.md
|
||||
ui/README.md
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build UI container
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.UI_WORKING_DIR }}
|
||||
target: prod
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
|
||||
- name: Scan UI container with Trivy
|
||||
if: github.repository == 'prowler-cloud/prowler' && steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: ./.github/actions/trivy-scan
|
||||
with:
|
||||
image-name: ${{ env.IMAGE_NAME }}
|
||||
image-tag: ${{ github.sha }}
|
||||
fail-on-critical: 'false'
|
||||
severity: 'CRITICAL'
|
||||
@@ -18,6 +18,12 @@ jobs:
|
||||
AUTH_TRUST_HOST: true
|
||||
NEXTAUTH_URL: 'http://localhost:3000'
|
||||
NEXT_PUBLIC_API_BASE_URL: 'http://localhost:8080/api/v1'
|
||||
E2E_ADMIN_USER: ${{ secrets.E2E_ADMIN_USER }}
|
||||
E2E_ADMIN_PASSWORD: ${{ secrets.E2E_ADMIN_PASSWORD }}
|
||||
E2E_AWS_PROVIDER_ACCOUNT_ID: ${{ secrets.E2E_AWS_PROVIDER_ACCOUNT_ID }}
|
||||
E2E_AWS_PROVIDER_ACCESS_KEY: ${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}
|
||||
E2E_AWS_PROVIDER_SECRET_KEY: ${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}
|
||||
E2E_AWS_PROVIDER_ROLE_ARN: ${{ secrets.E2E_AWS_PROVIDER_ROLE_ARN }}
|
||||
E2E_NEW_PASSWORD: ${{ secrets.E2E_NEW_PASSWORD }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
name: UI - Pull Request
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- ".github/workflows/ui-pull-request.yml"
|
||||
- "ui/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- "v5.*"
|
||||
paths:
|
||||
- 'ui/**'
|
||||
env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
IMAGE_NAME: prowler-ui
|
||||
|
||||
jobs:
|
||||
test-and-coverage:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
node-version: [20.x]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
- name: Install dependencies
|
||||
working-directory: ./ui
|
||||
run: npm ci
|
||||
- name: Run Healthcheck
|
||||
working-directory: ./ui
|
||||
run: npm run healthcheck
|
||||
- name: Build the application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
- name: Build Container
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: ${{ env.UI_WORKING_DIR }}
|
||||
# Always build using `prod` target
|
||||
target: prod
|
||||
push: false
|
||||
tags: ${{ env.IMAGE_NAME }}:latest
|
||||
outputs: type=docker
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
@@ -0,0 +1,67 @@
|
||||
name: 'UI: Tests'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-tests.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'v5.*'
|
||||
paths:
|
||||
- 'ui/**'
|
||||
- '.github/workflows/ui-tests.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
NODE_VERSION: '20.x'
|
||||
|
||||
jobs:
|
||||
ui-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
contents: read
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./ui
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Check for UI changes
|
||||
id: check-changes
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_ignore: |
|
||||
ui/CHANGELOG.md
|
||||
ui/README.md
|
||||
|
||||
- name: Setup Node.js ${{ env.NODE_VERSION }}
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: './ui/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm ci
|
||||
|
||||
- name: Run healthcheck
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm run healthcheck
|
||||
|
||||
- name: Build application
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: npm run build
|
||||
+1
-1
@@ -10,4 +10,4 @@
|
||||
Want some swag as appreciation for your contribution?
|
||||
|
||||
# Prowler Developer Guide
|
||||
https://docs.prowler.com/projects/prowler-open-source/en/latest/developer-guide/introduction/
|
||||
https://goto.prowler.com/devguide
|
||||
|
||||
@@ -88,7 +88,7 @@ prowler dashboard
|
||||
| Kubernetes | 83 | 7 | 5 | 7 | Official | Stable | UI, API, CLI |
|
||||
| GitHub | 17 | 2 | 1 | 0 | Official | Stable | UI, API, CLI |
|
||||
| M365 | 70 | 7 | 3 | 2 | Official | Stable | UI, API, CLI |
|
||||
| OCI | 51 | 13 | 1 | 10 | Official | Stable | CLI |
|
||||
| OCI | 51 | 13 | 1 | 10 | Official | Stable | UI, API, CLI |
|
||||
| IaC | [See `trivy` docs.](https://trivy.dev/latest/docs/coverage/iac/) | N/A | N/A | N/A | Official | Beta | CLI |
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 0 | Official | Beta | CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | Beta | CLI |
|
||||
|
||||
@@ -5,7 +5,25 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
## [1.15.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
- Extend `GET /api/v1/providers` with provider-type filters and optional pagination disable to support the new Overview filters [(#8975)](https://github.com/prowler-cloud/prowler/pull/8975)
|
||||
- New endpoint to retrieve the number of providers grouped by provider type [(#8975)](https://github.com/prowler-cloud/prowler/pull/8975)
|
||||
- Support for configuring multiple LLM providers [(#8772)](https://github.com/prowler-cloud/prowler/pull/8772)
|
||||
- Support C5 compliance framework for Azure provider [(#9081)](https://github.com/prowler-cloud/prowler/pull/9081)
|
||||
- Support for Oracle Cloud Infrastructure (OCI) provider [(#8927)](https://github.com/prowler-cloud/prowler/pull/8927)
|
||||
- Support muting findings based on simple rules with custom reason [(#9051)](https://github.com/prowler-cloud/prowler/pull/9051)
|
||||
- Support C5 compliance framework for the GCP provider [(#9097)](https://github.com/prowler-cloud/prowler/pull/9097)
|
||||
- Support for Amazon Bedrock and OpenAI compatible providers in Lighthouse AI [(#8957)](https://github.com/prowler-cloud/prowler/pull/8957)
|
||||
|
||||
---
|
||||
|
||||
## [1.14.1] (Prowler 5.13.1)
|
||||
|
||||
### Fixed
|
||||
- `/api/v1/overviews/providers` collapses data by provider type so the UI receives a single aggregated record per cloud family even when multiple accounts exist [(#9053)](https://github.com/prowler-cloud/prowler/pull/9053)
|
||||
- Added retry logic to database transactions to handle Aurora read replica connection failures during scale-down events [(#9064)](https://github.com/prowler-cloud/prowler/pull/9064)
|
||||
- Security Hub integrations stop failing when they read relationships via the replica by allowing replica relations and saving updates through the primary [(#9080)](https://github.com/prowler-cloud/prowler/pull/9080)
|
||||
|
||||
---
|
||||
|
||||
## [1.14.0] (Prowler 5.13.0)
|
||||
|
||||
|
||||
Generated
+57
-2
@@ -1164,6 +1164,18 @@ files = [
|
||||
{file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "circuitbreaker"
|
||||
version = "2.1.3"
|
||||
description = "Python Circuit Breaker pattern implementation"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "circuitbreaker-2.1.3-py3-none-any.whl", hash = "sha256:87ba6a3ed03fdc7032bc175561c2b04d52ade9d5faf94ca2b035fbdc5e6b1dd1"},
|
||||
{file = "circuitbreaker-2.1.3.tar.gz", hash = "sha256:1a4baee510f7bea3c91b194dcce7c07805fe96c4423ed5594b75af438531d084"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.2.1"
|
||||
@@ -4046,6 +4058,29 @@ rsa = ["cryptography (>=3.0.0)"]
|
||||
signals = ["blinker (>=1.4.0)"]
|
||||
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
|
||||
|
||||
[[package]]
|
||||
name = "oci"
|
||||
version = "2.160.3"
|
||||
description = "Oracle Cloud Infrastructure Python SDK"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "oci-2.160.3-py3-none-any.whl", hash = "sha256:858bff3e697098bdda44833d2476bfb4632126f0182178e7dbde4dbd156d71f0"},
|
||||
{file = "oci-2.160.3.tar.gz", hash = "sha256:57514889be3b713a8385d86e3ba8a33cf46e3563c2a7e29a93027fb30b8a2537"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = "*"
|
||||
circuitbreaker = {version = ">=1.3.1,<3.0.0", markers = "python_version >= \"3.7\""}
|
||||
cryptography = ">=3.2.1,<46.0.0"
|
||||
pyOpenSSL = ">=17.5.0,<25.0.0"
|
||||
python-dateutil = ">=2.5.3,<3.0.0"
|
||||
pytz = ">=2016.10"
|
||||
|
||||
[package.extras]
|
||||
adk = ["docstring-parser (>=0.16) ; python_version >= \"3.10\" and python_version < \"4\"", "mcp (>=1.6.0) ; python_version >= \"3.10\" and python_version < \"4\"", "pydantic (>=2.10.6) ; python_version >= \"3.10\" and python_version < \"4\"", "rich (>=13.9.4) ; python_version >= \"3.10\" and python_version < \"4\""]
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.101.0"
|
||||
@@ -4580,7 +4615,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "prowler"
|
||||
version = "5.13.0"
|
||||
version = "5.14.0"
|
||||
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
|
||||
optional = false
|
||||
python-versions = ">3.9.1,<3.13"
|
||||
@@ -4634,6 +4669,7 @@ markdown = "3.9.0"
|
||||
microsoft-kiota-abstractions = "1.9.2"
|
||||
msgraph-sdk = "1.23.0"
|
||||
numpy = "2.0.2"
|
||||
oci = "2.160.3"
|
||||
pandas = "2.2.3"
|
||||
py-iam-expand = "0.1.0"
|
||||
py-ocsf-models = "0.5.0"
|
||||
@@ -4651,7 +4687,7 @@ tzlocal = "5.3.1"
|
||||
type = "git"
|
||||
url = "https://github.com/prowler-cloud/prowler.git"
|
||||
reference = "master"
|
||||
resolved_reference = "a52697bfdfee83d14a49c11dcbe96888b5cd767e"
|
||||
resolved_reference = "a41f8dcb18190e44cebed11d5cea8850bd3ba80e"
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
@@ -5136,6 +5172,25 @@ cffi = ">=1.4.1"
|
||||
docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
|
||||
tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "24.3.0"
|
||||
description = "Python wrapper module around the OpenSSL library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
|
||||
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=41.0.5,<45"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
|
||||
test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"]
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.2.3"
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# from django.contrib import admin
|
||||
|
||||
# Register your models here.
|
||||
@@ -48,8 +48,9 @@ class MainRouter:
|
||||
return db == self.admin_db
|
||||
|
||||
def allow_relation(self, obj1, obj2, **hints): # noqa: F841
|
||||
# Allow relations if both objects are in either "default" or "admin" db connectors
|
||||
if {obj1._state.db, obj2._state.db} <= {self.default_db, self.admin_db}:
|
||||
# Allow relations when both objects originate from allowed connectors
|
||||
allowed_dbs = {self.default_db, self.admin_db, self.replica_db}
|
||||
if {obj1._state.db, obj2._state.db} <= allowed_dbs:
|
||||
return True
|
||||
return None
|
||||
|
||||
|
||||
@@ -1,18 +1,35 @@
|
||||
import re
|
||||
import secrets
|
||||
import time
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.env import env
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import BaseUserManager
|
||||
from django.db import DEFAULT_DB_ALIAS, connection, connections, models, transaction
|
||||
from django.db import (
|
||||
DEFAULT_DB_ALIAS,
|
||||
OperationalError,
|
||||
connection,
|
||||
connections,
|
||||
models,
|
||||
transaction,
|
||||
)
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from psycopg2 import connect as psycopg2_connect
|
||||
from psycopg2.extensions import AsIs, new_type, register_adapter, register_type
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.db_router import get_read_db_alias, reset_read_db_alias, set_read_db_alias
|
||||
from api.db_router import (
|
||||
READ_REPLICA_ALIAS,
|
||||
get_read_db_alias,
|
||||
reset_read_db_alias,
|
||||
set_read_db_alias,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
DB_USER = settings.DATABASES["default"]["USER"] if not settings.TESTING else "test"
|
||||
DB_PASSWORD = (
|
||||
@@ -28,6 +45,9 @@ TASK_RUNNER_DB_TABLE = "django_celery_results_taskresult"
|
||||
POSTGRES_TENANT_VAR = "api.tenant_id"
|
||||
POSTGRES_USER_VAR = "api.user_id"
|
||||
|
||||
REPLICA_MAX_ATTEMPTS = env.int("POSTGRES_REPLICA_MAX_ATTEMPTS", default=3)
|
||||
REPLICA_RETRY_BASE_DELAY = env.float("POSTGRES_REPLICA_RETRY_BASE_DELAY", default=0.5)
|
||||
|
||||
SET_CONFIG_QUERY = "SELECT set_config(%s, %s::text, TRUE);"
|
||||
|
||||
|
||||
@@ -71,24 +91,51 @@ def rls_transaction(
|
||||
if db_alias not in connections:
|
||||
db_alias = DEFAULT_DB_ALIAS
|
||||
|
||||
router_token = None
|
||||
try:
|
||||
if db_alias != DEFAULT_DB_ALIAS:
|
||||
router_token = set_read_db_alias(db_alias)
|
||||
alias = db_alias
|
||||
is_replica = READ_REPLICA_ALIAS and alias == READ_REPLICA_ALIAS
|
||||
max_attempts = REPLICA_MAX_ATTEMPTS if is_replica else 1
|
||||
|
||||
with transaction.atomic(using=db_alias):
|
||||
conn = connections[db_alias]
|
||||
with conn.cursor() as cursor:
|
||||
try:
|
||||
# just in case the value is a UUID object
|
||||
uuid.UUID(str(value))
|
||||
except ValueError:
|
||||
raise ValidationError("Must be a valid UUID")
|
||||
cursor.execute(SET_CONFIG_QUERY, [parameter, value])
|
||||
yield cursor
|
||||
finally:
|
||||
if router_token is not None:
|
||||
reset_read_db_alias(router_token)
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
router_token = None
|
||||
|
||||
# On final attempt, fallback to primary
|
||||
if attempt == max_attempts and is_replica:
|
||||
logger.warning(
|
||||
f"RLS transaction failed after {attempt - 1} attempts on replica, "
|
||||
f"falling back to primary DB"
|
||||
)
|
||||
alias = DEFAULT_DB_ALIAS
|
||||
|
||||
conn = connections[alias]
|
||||
try:
|
||||
if alias != DEFAULT_DB_ALIAS:
|
||||
router_token = set_read_db_alias(alias)
|
||||
|
||||
with transaction.atomic(using=alias):
|
||||
with conn.cursor() as cursor:
|
||||
try:
|
||||
# just in case the value is a UUID object
|
||||
uuid.UUID(str(value))
|
||||
except ValueError:
|
||||
raise ValidationError("Must be a valid UUID")
|
||||
cursor.execute(SET_CONFIG_QUERY, [parameter, value])
|
||||
yield cursor
|
||||
return
|
||||
except OperationalError as e:
|
||||
# If on primary or max attempts reached, raise
|
||||
if not is_replica or attempt == max_attempts:
|
||||
raise
|
||||
|
||||
# Retry with exponential backoff
|
||||
delay = REPLICA_RETRY_BASE_DELAY * (2 ** (attempt - 1))
|
||||
logger.info(
|
||||
f"RLS transaction failed on replica (attempt {attempt}/{max_attempts}), "
|
||||
f"retrying in {delay}s. Error: {e}"
|
||||
)
|
||||
time.sleep(delay)
|
||||
finally:
|
||||
if router_token is not None:
|
||||
reset_read_db_alias(router_token)
|
||||
|
||||
|
||||
class CustomUserManager(BaseUserManager):
|
||||
|
||||
@@ -30,6 +30,7 @@ from api.models import (
|
||||
LighthouseProviderConfiguration,
|
||||
LighthouseProviderModels,
|
||||
Membership,
|
||||
MuteRule,
|
||||
OverviewStatusChoices,
|
||||
PermissionChoices,
|
||||
Processor,
|
||||
@@ -247,6 +248,14 @@ class ProviderFilter(FilterSet):
|
||||
choices=Provider.ProviderChoices.choices,
|
||||
lookup_expr="in",
|
||||
)
|
||||
provider_type = ChoiceFilter(
|
||||
choices=Provider.ProviderChoices.choices, field_name="provider"
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
field_name="provider",
|
||||
choices=Provider.ProviderChoices.choices,
|
||||
lookup_expr="in",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Provider
|
||||
@@ -972,3 +981,20 @@ class LighthouseProviderModelsFilter(FilterSet):
|
||||
fields = {
|
||||
"model_id": ["exact", "icontains", "in"],
|
||||
}
|
||||
|
||||
|
||||
class MuteRuleFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
|
||||
created_by = UUIDFilter(field_name="created_by__id", lookup_expr="exact")
|
||||
|
||||
class Meta:
|
||||
model = MuteRule
|
||||
fields = {
|
||||
"id": ["exact", "in"],
|
||||
"name": ["exact", "icontains"],
|
||||
"reason": ["icontains"],
|
||||
"enabled": ["exact"],
|
||||
"inserted_at": ["gte", "lte"],
|
||||
"updated_at": ["gte", "lte"],
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
# Generated by Django 5.1.7 on 2025-10-14 00:00
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
import api.db_utils
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0050_lighthouse_multi_llm"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="provider",
|
||||
name="provider",
|
||||
field=api.db_utils.ProviderEnumField(
|
||||
choices=[
|
||||
("aws", "AWS"),
|
||||
("azure", "Azure"),
|
||||
("gcp", "GCP"),
|
||||
("kubernetes", "Kubernetes"),
|
||||
("m365", "M365"),
|
||||
("github", "GitHub"),
|
||||
("oci", "Oracle Cloud Infrastructure"),
|
||||
],
|
||||
default="aws",
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'oci';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,117 @@
|
||||
# Generated by Django 5.1.13 on 2025-10-22 11:56
|
||||
|
||||
import uuid
|
||||
|
||||
import django.contrib.postgres.fields
|
||||
import django.core.validators
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0051_oraclecloud_provider"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="MuteRule",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
("updated_at", models.DateTimeField(auto_now=True)),
|
||||
(
|
||||
"name",
|
||||
models.CharField(
|
||||
help_text="Human-readable name for this rule",
|
||||
max_length=100,
|
||||
validators=[django.core.validators.MinLengthValidator(3)],
|
||||
),
|
||||
),
|
||||
(
|
||||
"reason",
|
||||
models.TextField(
|
||||
help_text="Reason for muting",
|
||||
max_length=500,
|
||||
validators=[django.core.validators.MinLengthValidator(3)],
|
||||
),
|
||||
),
|
||||
(
|
||||
"enabled",
|
||||
models.BooleanField(
|
||||
default=True, help_text="Whether this rule is currently enabled"
|
||||
),
|
||||
),
|
||||
(
|
||||
"finding_uids",
|
||||
django.contrib.postgres.fields.ArrayField(
|
||||
base_field=models.CharField(max_length=255),
|
||||
help_text="List of finding UIDs to mute",
|
||||
size=None,
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "mute_rules",
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="muted_at",
|
||||
field=models.DateTimeField(
|
||||
blank=True, help_text="Timestamp when this finding was muted", null=True
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name="tenantapikey",
|
||||
name="name",
|
||||
field=models.CharField(
|
||||
max_length=100,
|
||||
validators=[django.core.validators.MinLengthValidator(3)],
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="muterule",
|
||||
name="created_by",
|
||||
field=models.ForeignKey(
|
||||
help_text="User who created this rule",
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name="created_mute_rules",
|
||||
to=settings.AUTH_USER_MODEL,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="muterule",
|
||||
name="tenant",
|
||||
field=models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="muterule",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_muterule",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="muterule",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("tenant_id", "name"), name="unique_mute_rule_name_per_tenant"
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.1.12 on 2025-10-14 11:46
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0052_mute_rules"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="lighthouseproviderconfiguration",
|
||||
name="provider_type",
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
("openai", "OpenAI"),
|
||||
("bedrock", "AWS Bedrock"),
|
||||
("openai_compatible", "OpenAI Compatible"),
|
||||
],
|
||||
help_text="LLM provider name",
|
||||
max_length=50,
|
||||
),
|
||||
)
|
||||
]
|
||||
@@ -284,6 +284,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
KUBERNETES = "kubernetes", _("Kubernetes")
|
||||
M365 = "m365", _("M365")
|
||||
GITHUB = "github", _("GitHub")
|
||||
OCI = "oci", _("Oracle Cloud Infrastructure")
|
||||
|
||||
@staticmethod
|
||||
def validate_aws_uid(value):
|
||||
@@ -354,6 +355,18 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def validate_oci_uid(value):
|
||||
if not re.match(
|
||||
r"^ocid1\.([a-z0-9_-]+)\.([a-z0-9_-]+)\.([a-z0-9_-]*)\.([a-z0-9]+)$", value
|
||||
):
|
||||
raise ModelValidationError(
|
||||
detail="Oracle Cloud Infrastructure provider ID must be a valid tenancy OCID in the format: "
|
||||
"ocid1.<resource_type>.<realm>.<region>.<unique_id>",
|
||||
code="oci-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
@@ -810,6 +823,9 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
muted_reason = models.TextField(
|
||||
blank=True, null=True, validators=[MinLengthValidator(3)], max_length=500
|
||||
)
|
||||
muted_at = models.DateTimeField(
|
||||
null=True, blank=True, help_text="Timestamp when this finding was muted"
|
||||
)
|
||||
compliance = models.JSONField(default=dict, null=True, blank=True)
|
||||
|
||||
# Denormalize resource data for performance
|
||||
@@ -1922,6 +1938,59 @@ class LighthouseConfiguration(RowLevelSecurityProtectedModel):
|
||||
resource_name = "lighthouse-configurations"
|
||||
|
||||
|
||||
class MuteRule(RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
|
||||
# Rule metadata
|
||||
name = models.CharField(
|
||||
max_length=100,
|
||||
validators=[MinLengthValidator(3)],
|
||||
help_text="Human-readable name for this rule",
|
||||
)
|
||||
reason = models.TextField(
|
||||
validators=[MinLengthValidator(3)],
|
||||
max_length=500,
|
||||
help_text="Reason for muting",
|
||||
)
|
||||
enabled = models.BooleanField(
|
||||
default=True, help_text="Whether this rule is currently enabled"
|
||||
)
|
||||
|
||||
# Audit fields
|
||||
created_by = models.ForeignKey(
|
||||
User,
|
||||
on_delete=models.SET_NULL,
|
||||
null=True,
|
||||
related_name="created_mute_rules",
|
||||
help_text="User who created this rule",
|
||||
)
|
||||
|
||||
# Rule criteria - array of finding UIDs
|
||||
finding_uids = ArrayField(
|
||||
models.CharField(max_length=255), help_text="List of finding UIDs to mute"
|
||||
)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "mute_rules"
|
||||
|
||||
constraints = [
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "name"),
|
||||
name="unique_mute_rule_name_per_tenant",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "mute-rules"
|
||||
|
||||
|
||||
class Processor(RowLevelSecurityProtectedModel):
|
||||
class ProcessorChoices(models.TextChoices):
|
||||
MUTELIST = "mutelist", _("Mutelist")
|
||||
@@ -1970,6 +2039,8 @@ class LighthouseProviderConfiguration(RowLevelSecurityProtectedModel):
|
||||
|
||||
class LLMProviderChoices(models.TextChoices):
|
||||
OPENAI = "openai", _("OpenAI")
|
||||
BEDROCK = "bedrock", _("AWS Bedrock")
|
||||
OPENAI_COMPATIBLE = "openai_compatible", _("OpenAI Compatible")
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
@@ -2091,7 +2162,7 @@ class LighthouseTenantConfiguration(RowLevelSecurityProtectedModel):
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "lighthouse-config"
|
||||
resource_name = "lighthouse-configurations"
|
||||
|
||||
|
||||
class LighthouseProviderModels(RowLevelSecurityProtectedModel):
|
||||
@@ -2140,3 +2211,6 @@ class LighthouseProviderModels(RowLevelSecurityProtectedModel):
|
||||
name="lh_prov_models_cfg_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "lighthouse-models"
|
||||
|
||||
+1236
-60
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,39 @@
|
||||
"""Tests for rls_transaction retry and fallback logic."""
|
||||
|
||||
import pytest
|
||||
from django.db import DEFAULT_DB_ALIAS
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestRLSTransaction:
|
||||
"""Simple integration tests for rls_transaction using real DB."""
|
||||
|
||||
@pytest.fixture
|
||||
def tenant(self, tenants_fixture):
|
||||
return tenants_fixture[0]
|
||||
|
||||
def test_success_on_primary(self, tenant):
|
||||
"""Basic: transaction succeeds on primary database."""
|
||||
with rls_transaction(str(tenant.id), using=DEFAULT_DB_ALIAS) as cursor:
|
||||
cursor.execute("SELECT 1")
|
||||
result = cursor.fetchone()
|
||||
assert result == (1,)
|
||||
|
||||
def test_invalid_uuid_raises_validation_error(self):
|
||||
"""Invalid UUID raises ValidationError before DB operations."""
|
||||
with pytest.raises(ValidationError, match="Must be a valid UUID"):
|
||||
with rls_transaction("not-a-uuid", using=DEFAULT_DB_ALIAS):
|
||||
pass
|
||||
|
||||
def test_custom_parameter_name(self, tenant):
|
||||
"""Test custom RLS parameter name."""
|
||||
custom_param = "api.custom_id"
|
||||
with rls_transaction(
|
||||
str(tenant.id), parameter=custom_param, using=DEFAULT_DB_ALIAS
|
||||
) as cursor:
|
||||
cursor.execute("SELECT current_setting(%s, true)", [custom_param])
|
||||
result = cursor.fetchone()
|
||||
assert result == (str(tenant.id),)
|
||||
@@ -1,12 +1,15 @@
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from django.conf import settings
|
||||
from django.db import DEFAULT_DB_ALIAS, OperationalError
|
||||
from freezegun import freeze_time
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
from api.db_utils import (
|
||||
POSTGRES_TENANT_VAR,
|
||||
_should_create_index_on_partition,
|
||||
batch_delete,
|
||||
create_objects_in_batches,
|
||||
@@ -14,11 +17,22 @@ from api.db_utils import (
|
||||
generate_api_key_prefix,
|
||||
generate_random_token,
|
||||
one_week_from_now,
|
||||
rls_transaction,
|
||||
update_objects_in_batches,
|
||||
)
|
||||
from api.models import Provider
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def enable_read_replica():
|
||||
"""
|
||||
Fixture to enable READ_REPLICA_ALIAS for tests that need replica functionality.
|
||||
This avoids polluting the global test configuration.
|
||||
"""
|
||||
with patch("api.db_utils.READ_REPLICA_ALIAS", "replica"):
|
||||
yield "replica"
|
||||
|
||||
|
||||
class TestEnumToChoices:
|
||||
def test_enum_to_choices_simple(self):
|
||||
class Color(Enum):
|
||||
@@ -339,3 +353,498 @@ class TestGenerateApiKeyPrefix:
|
||||
prefix = generate_api_key_prefix()
|
||||
random_part = prefix[3:] # Strip 'pk_'
|
||||
assert all(char in allowed_chars for char in random_part)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestRlsTransaction:
|
||||
def test_rls_transaction_valid_uuid_string(self, tenants_fixture):
|
||||
"""Test rls_transaction with valid UUID string."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with rls_transaction(tenant_id) as cursor:
|
||||
assert cursor is not None
|
||||
cursor.execute("SELECT current_setting(%s)", [POSTGRES_TENANT_VAR])
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == tenant_id
|
||||
|
||||
def test_rls_transaction_valid_uuid_object(self, tenants_fixture):
|
||||
"""Test rls_transaction with UUID object."""
|
||||
tenant = tenants_fixture[0]
|
||||
|
||||
with rls_transaction(tenant.id) as cursor:
|
||||
assert cursor is not None
|
||||
cursor.execute("SELECT current_setting(%s)", [POSTGRES_TENANT_VAR])
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == str(tenant.id)
|
||||
|
||||
def test_rls_transaction_invalid_uuid_raises_validation_error(self):
|
||||
"""Test rls_transaction raises ValidationError for invalid UUID."""
|
||||
invalid_uuid = "not-a-valid-uuid"
|
||||
|
||||
with pytest.raises(ValidationError, match="Must be a valid UUID"):
|
||||
with rls_transaction(invalid_uuid):
|
||||
pass
|
||||
|
||||
def test_rls_transaction_uses_default_database_when_no_alias(self, tenants_fixture):
|
||||
"""Test rls_transaction uses DEFAULT_DB_ALIAS when no alias specified."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=None):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_connections.__getitem__.assert_called_with(DEFAULT_DB_ALIAS)
|
||||
|
||||
def test_rls_transaction_uses_specified_alias(self, tenants_fixture):
|
||||
"""Test rls_transaction uses specified database alias via using parameter."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
custom_alias = "custom_db"
|
||||
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with patch("api.db_utils.set_read_db_alias") as mock_set_alias:
|
||||
with patch("api.db_utils.reset_read_db_alias") as mock_reset_alias:
|
||||
mock_set_alias.return_value = "test_token"
|
||||
with rls_transaction(tenant_id, using=custom_alias):
|
||||
pass
|
||||
|
||||
mock_connections.__getitem__.assert_called_with(custom_alias)
|
||||
mock_set_alias.assert_called_once_with(custom_alias)
|
||||
mock_reset_alias.assert_called_once_with("test_token")
|
||||
|
||||
def test_rls_transaction_uses_read_replica_from_router(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test rls_transaction uses read replica alias from router."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with patch("api.db_utils.set_read_db_alias") as mock_set_alias:
|
||||
with patch(
|
||||
"api.db_utils.reset_read_db_alias"
|
||||
) as mock_reset_alias:
|
||||
mock_set_alias.return_value = "test_token"
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_connections.__getitem__.assert_called()
|
||||
mock_set_alias.assert_called_once()
|
||||
mock_reset_alias.assert_called_once()
|
||||
|
||||
def test_rls_transaction_fallback_to_default_when_alias_not_in_connections(
|
||||
self, tenants_fixture
|
||||
):
|
||||
"""Test rls_transaction falls back to DEFAULT_DB_ALIAS when alias not in connections."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
invalid_alias = "nonexistent_db"
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=invalid_alias):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
|
||||
def contains_check(alias):
|
||||
return alias == DEFAULT_DB_ALIAS
|
||||
|
||||
mock_connections.__contains__.side_effect = contains_check
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_connections.__getitem__.assert_called_with(DEFAULT_DB_ALIAS)
|
||||
|
||||
def test_rls_transaction_successful_execution_on_replica_no_retries(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test successful execution on replica without retries."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with patch("api.db_utils.set_read_db_alias", return_value="token"):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
assert mock_cursor.execute.call_count == 1
|
||||
|
||||
def test_rls_transaction_retry_with_exponential_backoff_on_operational_error(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test retry with exponential backoff on OperationalError on replica."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
call_count = 0
|
||||
|
||||
def atomic_side_effect(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count < 3:
|
||||
raise OperationalError("Connection error")
|
||||
return MagicMock(
|
||||
__enter__=MagicMock(return_value=None),
|
||||
__exit__=MagicMock(return_value=False),
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.db_utils.transaction.atomic", side_effect=atomic_side_effect
|
||||
):
|
||||
with patch("api.db_utils.time.sleep") as mock_sleep:
|
||||
with patch(
|
||||
"api.db_utils.set_read_db_alias", return_value="token"
|
||||
):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with patch("api.db_utils.logger") as mock_logger:
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
assert mock_sleep.call_count == 2
|
||||
mock_sleep.assert_any_call(0.5)
|
||||
mock_sleep.assert_any_call(1.0)
|
||||
assert mock_logger.info.call_count == 2
|
||||
|
||||
def test_rls_transaction_max_three_attempts_for_replica(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test maximum 3 attempts for replica database."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic") as mock_atomic:
|
||||
mock_atomic.side_effect = OperationalError("Persistent error")
|
||||
|
||||
with patch("api.db_utils.time.sleep"):
|
||||
with patch(
|
||||
"api.db_utils.set_read_db_alias", return_value="token"
|
||||
):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with pytest.raises(OperationalError):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
assert mock_atomic.call_count == 3
|
||||
|
||||
def test_rls_transaction_only_one_attempt_for_primary(self, tenants_fixture):
|
||||
"""Test only 1 attempt for primary database."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=None):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic") as mock_atomic:
|
||||
mock_atomic.side_effect = OperationalError("Primary error")
|
||||
|
||||
with pytest.raises(OperationalError):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
assert mock_atomic.call_count == 1
|
||||
|
||||
def test_rls_transaction_fallback_to_primary_after_max_attempts(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test fallback to primary DB after max attempts on replica."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
call_count = 0
|
||||
|
||||
def atomic_side_effect(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count < 3:
|
||||
raise OperationalError("Replica error")
|
||||
return MagicMock(
|
||||
__enter__=MagicMock(return_value=None),
|
||||
__exit__=MagicMock(return_value=False),
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.db_utils.transaction.atomic", side_effect=atomic_side_effect
|
||||
):
|
||||
with patch("api.db_utils.time.sleep"):
|
||||
with patch(
|
||||
"api.db_utils.set_read_db_alias", return_value="token"
|
||||
):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with patch("api.db_utils.logger") as mock_logger:
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_logger.warning.assert_called_once()
|
||||
warning_msg = mock_logger.warning.call_args[0][0]
|
||||
assert "falling back to primary DB" in warning_msg
|
||||
|
||||
def test_rls_transaction_logger_warning_on_fallback(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test logger warnings are emitted on fallback to primary."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
call_count = 0
|
||||
|
||||
def atomic_side_effect(*args, **kwargs):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count < 3:
|
||||
raise OperationalError("Replica error")
|
||||
return MagicMock(
|
||||
__enter__=MagicMock(return_value=None),
|
||||
__exit__=MagicMock(return_value=False),
|
||||
)
|
||||
|
||||
with patch(
|
||||
"api.db_utils.transaction.atomic", side_effect=atomic_side_effect
|
||||
):
|
||||
with patch("api.db_utils.time.sleep"):
|
||||
with patch(
|
||||
"api.db_utils.set_read_db_alias", return_value="token"
|
||||
):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with patch("api.db_utils.logger") as mock_logger:
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
assert mock_logger.info.call_count == 2
|
||||
assert mock_logger.warning.call_count == 1
|
||||
|
||||
def test_rls_transaction_operational_error_raised_immediately_on_primary(
|
||||
self, tenants_fixture
|
||||
):
|
||||
"""Test OperationalError raised immediately on primary without retry."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=None):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic") as mock_atomic:
|
||||
mock_atomic.side_effect = OperationalError("Primary error")
|
||||
|
||||
with patch("api.db_utils.time.sleep") as mock_sleep:
|
||||
with pytest.raises(OperationalError):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_sleep.assert_not_called()
|
||||
|
||||
def test_rls_transaction_operational_error_raised_after_max_attempts(
|
||||
self, tenants_fixture, enable_read_replica
|
||||
):
|
||||
"""Test OperationalError raised after max attempts on replica."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=enable_read_replica):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic") as mock_atomic:
|
||||
mock_atomic.side_effect = OperationalError(
|
||||
"Persistent replica error"
|
||||
)
|
||||
|
||||
with patch("api.db_utils.time.sleep"):
|
||||
with patch(
|
||||
"api.db_utils.set_read_db_alias", return_value="token"
|
||||
):
|
||||
with patch("api.db_utils.reset_read_db_alias"):
|
||||
with pytest.raises(OperationalError):
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
def test_rls_transaction_router_token_set_for_non_default_alias(
|
||||
self, tenants_fixture
|
||||
):
|
||||
"""Test router token is set when using non-default alias."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
custom_alias = "custom_db"
|
||||
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with patch("api.db_utils.set_read_db_alias") as mock_set_alias:
|
||||
with patch("api.db_utils.reset_read_db_alias") as mock_reset_alias:
|
||||
mock_set_alias.return_value = "test_token"
|
||||
with rls_transaction(tenant_id, using=custom_alias):
|
||||
pass
|
||||
|
||||
mock_set_alias.assert_called_once_with(custom_alias)
|
||||
mock_reset_alias.assert_called_once_with("test_token")
|
||||
|
||||
def test_rls_transaction_router_token_reset_in_finally_block(self, tenants_fixture):
|
||||
"""Test router token is reset in finally block even on error."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
custom_alias = "custom_db"
|
||||
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic") as mock_atomic:
|
||||
mock_atomic.side_effect = Exception("Unexpected error")
|
||||
|
||||
with patch("api.db_utils.set_read_db_alias", return_value="test_token"):
|
||||
with patch("api.db_utils.reset_read_db_alias") as mock_reset_alias:
|
||||
with pytest.raises(Exception):
|
||||
with rls_transaction(tenant_id, using=custom_alias):
|
||||
pass
|
||||
|
||||
mock_reset_alias.assert_called_once_with("test_token")
|
||||
|
||||
def test_rls_transaction_router_token_not_set_for_default_alias(
|
||||
self, tenants_fixture
|
||||
):
|
||||
"""Test router token is not set when using default alias."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with patch("api.db_utils.get_read_db_alias", return_value=None):
|
||||
with patch("api.db_utils.connections") as mock_connections:
|
||||
mock_conn = MagicMock()
|
||||
mock_cursor = MagicMock()
|
||||
mock_conn.cursor.return_value.__enter__.return_value = mock_cursor
|
||||
mock_connections.__getitem__.return_value = mock_conn
|
||||
mock_connections.__contains__.return_value = True
|
||||
|
||||
with patch("api.db_utils.transaction.atomic"):
|
||||
with patch("api.db_utils.set_read_db_alias") as mock_set_alias:
|
||||
with patch(
|
||||
"api.db_utils.reset_read_db_alias"
|
||||
) as mock_reset_alias:
|
||||
with rls_transaction(tenant_id):
|
||||
pass
|
||||
|
||||
mock_set_alias.assert_not_called()
|
||||
mock_reset_alias.assert_not_called()
|
||||
|
||||
def test_rls_transaction_set_config_query_executed_with_correct_params(
|
||||
self, tenants_fixture
|
||||
):
|
||||
"""Test SET_CONFIG_QUERY executed with correct parameters."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with rls_transaction(tenant_id) as cursor:
|
||||
cursor.execute("SELECT current_setting(%s)", [POSTGRES_TENANT_VAR])
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == tenant_id
|
||||
|
||||
def test_rls_transaction_custom_parameter(self, tenants_fixture):
|
||||
"""Test rls_transaction with custom parameter name."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
custom_param = "api.user_id"
|
||||
|
||||
with rls_transaction(tenant_id, parameter=custom_param) as cursor:
|
||||
cursor.execute("SELECT current_setting(%s)", [custom_param])
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == tenant_id
|
||||
|
||||
def test_rls_transaction_cursor_yielded_correctly(self, tenants_fixture):
|
||||
"""Test cursor is yielded correctly."""
|
||||
tenant = tenants_fixture[0]
|
||||
tenant_id = str(tenant.id)
|
||||
|
||||
with rls_transaction(tenant_id) as cursor:
|
||||
assert cursor is not None
|
||||
cursor.execute("SELECT 1")
|
||||
result = cursor.fetchone()
|
||||
assert result[0] == 1
|
||||
|
||||
@@ -22,6 +22,7 @@ from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
from prowler.providers.oraclecloud.oci_provider import OciProvider
|
||||
|
||||
|
||||
class TestMergeDicts:
|
||||
@@ -108,6 +109,7 @@ class TestReturnProwlerProvider:
|
||||
(Provider.ProviderChoices.AZURE.value, AzureProvider),
|
||||
(Provider.ProviderChoices.KUBERNETES.value, KubernetesProvider),
|
||||
(Provider.ProviderChoices.M365.value, M365Provider),
|
||||
(Provider.ProviderChoices.OCI.value, OciProvider),
|
||||
],
|
||||
)
|
||||
def test_return_prowler_provider(self, provider_type, expected_provider):
|
||||
@@ -203,6 +205,10 @@ class TestGetProwlerProviderKwargs:
|
||||
Provider.ProviderChoices.GITHUB.value,
|
||||
{"organizations": ["provider_uid"]},
|
||||
),
|
||||
(
|
||||
Provider.ProviderChoices.OCI.value,
|
||||
{},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_prowler_provider_kwargs(self, provider_type, expected_extra_kwargs):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,7 @@ from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.github.github_provider import GithubProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from prowler.providers.m365.m365_provider import M365Provider
|
||||
from prowler.providers.oraclecloud.oci_provider import OciProvider
|
||||
|
||||
|
||||
class CustomOAuth2Client(OAuth2Client):
|
||||
@@ -67,6 +68,7 @@ def return_prowler_provider(
|
||||
| GithubProvider
|
||||
| KubernetesProvider
|
||||
| M365Provider
|
||||
| OciProvider
|
||||
]:
|
||||
"""Return the Prowler provider class based on the given provider type.
|
||||
|
||||
@@ -74,7 +76,7 @@ def return_prowler_provider(
|
||||
provider (Provider): The provider object containing the provider type and associated secrets.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: The corresponding provider class.
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider | OciProvider: The corresponding provider class.
|
||||
|
||||
Raises:
|
||||
ValueError: If the provider type specified in `provider.provider` is not supported.
|
||||
@@ -92,6 +94,8 @@ def return_prowler_provider(
|
||||
prowler_provider = M365Provider
|
||||
case Provider.ProviderChoices.GITHUB.value:
|
||||
prowler_provider = GithubProvider
|
||||
case Provider.ProviderChoices.OCI.value:
|
||||
prowler_provider = OciProvider
|
||||
case _:
|
||||
raise ValueError(f"Provider type {provider.provider} not supported")
|
||||
return prowler_provider
|
||||
@@ -147,6 +151,7 @@ def initialize_prowler_provider(
|
||||
| GithubProvider
|
||||
| KubernetesProvider
|
||||
| M365Provider
|
||||
| OciProvider
|
||||
):
|
||||
"""Initialize a Prowler provider instance based on the given provider type.
|
||||
|
||||
@@ -155,8 +160,8 @@ def initialize_prowler_provider(
|
||||
mutelist_processor (Processor): The mutelist processor object containing the mutelist configuration.
|
||||
|
||||
Returns:
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `GithubProvider`, `KubernetesProvider` or `M365Provider`) initialized with the
|
||||
AwsProvider | AzureProvider | GcpProvider | GithubProvider | KubernetesProvider | M365Provider | OciProvider: An instance of the corresponding provider class
|
||||
(`AwsProvider`, `AzureProvider`, `GcpProvider`, `GithubProvider`, `KubernetesProvider`, `M365Provider` or `OciProvider`) initialized with the
|
||||
provider's secrets.
|
||||
"""
|
||||
prowler_provider = return_prowler_provider(provider)
|
||||
|
||||
@@ -12,6 +12,24 @@ from api.models import StateChoices, Task
|
||||
from api.v1.serializers import TaskSerializer
|
||||
|
||||
|
||||
class DisablePaginationMixin:
|
||||
disable_pagination_query_param = "page[disable]"
|
||||
disable_pagination_truthy_values = {"true"}
|
||||
|
||||
def should_disable_pagination(self) -> bool:
|
||||
if not hasattr(self, "request"):
|
||||
return False
|
||||
value = self.request.query_params.get(self.disable_pagination_query_param)
|
||||
if value is None:
|
||||
return False
|
||||
return str(value).lower() in self.disable_pagination_truthy_values
|
||||
|
||||
def paginate_queryset(self, queryset):
|
||||
if self.should_disable_pagination():
|
||||
return None
|
||||
return super().paginate_queryset(queryset)
|
||||
|
||||
|
||||
class PaginateByPkMixin:
|
||||
"""
|
||||
Mixin to paginate on a list of PKs (cheaper than heavy JOINs),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import re
|
||||
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from rest_framework_json_api import serializers
|
||||
|
||||
|
||||
@@ -11,3 +12,198 @@ class OpenAICredentialsSerializer(serializers.Serializer):
|
||||
if not re.match(pattern, value or ""):
|
||||
raise serializers.ValidationError("Invalid OpenAI API key format.")
|
||||
return value
|
||||
|
||||
def to_internal_value(self, data):
|
||||
"""Check for unknown fields before DRF filters them out."""
|
||||
if not isinstance(data, dict):
|
||||
raise serializers.ValidationError(
|
||||
{"non_field_errors": ["Credentials must be an object"]}
|
||||
)
|
||||
|
||||
allowed_fields = set(self.fields.keys())
|
||||
provided_fields = set(data.keys())
|
||||
extra_fields = provided_fields - allowed_fields
|
||||
|
||||
if extra_fields:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"non_field_errors": [
|
||||
f"Unknown fields in credentials: {', '.join(sorted(extra_fields))}"
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
class BedrockCredentialsSerializer(serializers.Serializer):
|
||||
"""
|
||||
Serializer for AWS Bedrock credentials validation.
|
||||
|
||||
Validates long-term AWS credentials (AKIA) and region format.
|
||||
"""
|
||||
|
||||
access_key_id = serializers.CharField()
|
||||
secret_access_key = serializers.CharField()
|
||||
region = serializers.CharField()
|
||||
|
||||
def validate_access_key_id(self, value: str) -> str:
|
||||
"""Validate AWS access key ID format (AKIA for long-term credentials)."""
|
||||
pattern = r"^AKIA[0-9A-Z]{16}$"
|
||||
if not re.match(pattern, value or ""):
|
||||
raise serializers.ValidationError(
|
||||
"Invalid AWS access key ID format. Must be AKIA followed by 16 alphanumeric characters."
|
||||
)
|
||||
return value
|
||||
|
||||
def validate_secret_access_key(self, value: str) -> str:
|
||||
"""Validate AWS secret access key format (40 base64 characters)."""
|
||||
pattern = r"^[A-Za-z0-9/+=]{40}$"
|
||||
if not re.match(pattern, value or ""):
|
||||
raise serializers.ValidationError(
|
||||
"Invalid AWS secret access key format. Must be 40 base64 characters."
|
||||
)
|
||||
return value
|
||||
|
||||
def validate_region(self, value: str) -> str:
|
||||
"""Validate AWS region format."""
|
||||
pattern = r"^[a-z]{2}-[a-z]+-\d+$"
|
||||
if not re.match(pattern, value or ""):
|
||||
raise serializers.ValidationError(
|
||||
"Invalid AWS region format. Expected format like 'us-east-1' or 'eu-west-2'."
|
||||
)
|
||||
return value
|
||||
|
||||
def to_internal_value(self, data):
|
||||
"""Check for unknown fields before DRF filters them out."""
|
||||
if not isinstance(data, dict):
|
||||
raise serializers.ValidationError(
|
||||
{"non_field_errors": ["Credentials must be an object"]}
|
||||
)
|
||||
|
||||
allowed_fields = set(self.fields.keys())
|
||||
provided_fields = set(data.keys())
|
||||
extra_fields = provided_fields - allowed_fields
|
||||
|
||||
if extra_fields:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"non_field_errors": [
|
||||
f"Unknown fields in credentials: {', '.join(sorted(extra_fields))}"
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
class BedrockCredentialsUpdateSerializer(BedrockCredentialsSerializer):
|
||||
"""
|
||||
Serializer for AWS Bedrock credentials during UPDATE operations.
|
||||
|
||||
Inherits all validation logic from BedrockCredentialsSerializer but makes
|
||||
all fields optional to support partial updates.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
# Make all fields optional for updates
|
||||
for field in self.fields.values():
|
||||
field.required = False
|
||||
|
||||
|
||||
class OpenAICompatibleCredentialsSerializer(serializers.Serializer):
|
||||
"""
|
||||
Minimal serializer for OpenAI-compatible credentials.
|
||||
|
||||
Many OpenAI-compatible providers do not use the same key format as OpenAI.
|
||||
We only require a non-empty API key string. Additional fields can be added later
|
||||
without breaking existing configurations.
|
||||
"""
|
||||
|
||||
api_key = serializers.CharField()
|
||||
|
||||
def validate_api_key(self, value: str) -> str:
|
||||
if not isinstance(value, str) or not value.strip():
|
||||
raise serializers.ValidationError("API key is required.")
|
||||
return value.strip()
|
||||
|
||||
def to_internal_value(self, data):
|
||||
"""Check for unknown fields before DRF filters them out."""
|
||||
if not isinstance(data, dict):
|
||||
raise serializers.ValidationError(
|
||||
{"non_field_errors": ["Credentials must be an object"]}
|
||||
)
|
||||
|
||||
allowed_fields = set(self.fields.keys())
|
||||
provided_fields = set(data.keys())
|
||||
extra_fields = provided_fields - allowed_fields
|
||||
|
||||
if extra_fields:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"non_field_errors": [
|
||||
f"Unknown fields in credentials: {', '.join(sorted(extra_fields))}"
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "OpenAI Credentials",
|
||||
"properties": {
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"description": "OpenAI API key. Must start with 'sk-' followed by alphanumeric characters, "
|
||||
"hyphens, or underscores.",
|
||||
"pattern": "^sk-[\\w-]+$",
|
||||
}
|
||||
},
|
||||
"required": ["api_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Bedrock Credentials",
|
||||
"properties": {
|
||||
"access_key_id": {
|
||||
"type": "string",
|
||||
"description": "AWS access key ID.",
|
||||
"pattern": "^AKIA[0-9A-Z]{16}$",
|
||||
},
|
||||
"secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "AWS secret access key.",
|
||||
"pattern": "^[A-Za-z0-9/+=]{40}$",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "AWS region identifier where Bedrock is available. Examples: us-east-1, "
|
||||
"us-west-2, eu-west-1, ap-northeast-1.",
|
||||
"pattern": "^[a-z]{2}-[a-z]+-\\d+$",
|
||||
},
|
||||
},
|
||||
"required": ["access_key_id", "secret_access_key", "region"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "OpenAI Compatible Credentials",
|
||||
"properties": {
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"description": "API key for OpenAI-compatible provider. The format varies by provider. "
|
||||
"Note: The 'base_url' field (separate from credentials) is required when using this provider type.",
|
||||
}
|
||||
},
|
||||
"required": ["api_key"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
class LighthouseCredentialsField(serializers.JSONField):
|
||||
pass
|
||||
|
||||
@@ -239,6 +239,41 @@ from rest_framework_json_api import serializers
|
||||
},
|
||||
"required": ["github_app_id", "github_app_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Oracle Cloud Infrastructure (OCI) API Key Credentials",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "string",
|
||||
"description": "The OCID of the user to authenticate with.",
|
||||
},
|
||||
"fingerprint": {
|
||||
"type": "string",
|
||||
"description": "The fingerprint of the API signing key.",
|
||||
},
|
||||
"key_file": {
|
||||
"type": "string",
|
||||
"description": "The path to the private key file for API signing. Either key_file or key_content must be provided.",
|
||||
},
|
||||
"key_content": {
|
||||
"type": "string",
|
||||
"description": "The content of the private key for API signing (base64 encoded). Either key_file or key_content must be provided.",
|
||||
},
|
||||
"tenancy": {
|
||||
"type": "string",
|
||||
"description": "The OCID of the tenancy.",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "The OCI region identifier (e.g., us-ashburn-1, us-phoenix-1).",
|
||||
},
|
||||
"pass_phrase": {
|
||||
"type": "string",
|
||||
"description": "The passphrase for the private key, if encrypted.",
|
||||
},
|
||||
},
|
||||
"required": ["user", "fingerprint", "tenancy", "region"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ from api.models import (
|
||||
LighthouseProviderModels,
|
||||
LighthouseTenantConfiguration,
|
||||
Membership,
|
||||
MuteRule,
|
||||
Processor,
|
||||
Provider,
|
||||
ProviderGroup,
|
||||
@@ -59,7 +60,13 @@ from api.v1.serializer_utils.integrations import (
|
||||
S3ConfigSerializer,
|
||||
SecurityHubConfigSerializer,
|
||||
)
|
||||
from api.v1.serializer_utils.lighthouse import OpenAICredentialsSerializer
|
||||
from api.v1.serializer_utils.lighthouse import (
|
||||
BedrockCredentialsSerializer,
|
||||
BedrockCredentialsUpdateSerializer,
|
||||
LighthouseCredentialsField,
|
||||
OpenAICompatibleCredentialsSerializer,
|
||||
OpenAICredentialsSerializer,
|
||||
)
|
||||
from api.v1.serializer_utils.processors import ProcessorConfigField
|
||||
from api.v1.serializer_utils.providers import ProviderSecretField
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
@@ -1359,6 +1366,8 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
serializer = KubernetesProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.M365.value:
|
||||
serializer = M365ProviderSecret(data=secret)
|
||||
elif provider_type == Provider.ProviderChoices.OCI.value:
|
||||
serializer = OracleCloudProviderSecret(data=secret)
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{"provider": f"Provider type not supported {provider_type}"}
|
||||
@@ -1472,6 +1481,19 @@ class GithubProviderSecret(serializers.Serializer):
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class OracleCloudProviderSecret(serializers.Serializer):
|
||||
user = serializers.CharField()
|
||||
fingerprint = serializers.CharField()
|
||||
key_file = serializers.CharField(required=False)
|
||||
key_content = serializers.CharField(required=False)
|
||||
tenancy = serializers.CharField()
|
||||
region = serializers.CharField()
|
||||
pass_phrase = serializers.CharField(required=False)
|
||||
|
||||
class Meta:
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField()
|
||||
@@ -2105,6 +2127,17 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
}
|
||||
|
||||
|
||||
class OverviewProviderCountSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(source="provider")
|
||||
count = serializers.IntegerField()
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "providers-count-overview"
|
||||
|
||||
def get_root_meta(self, _resource, _many):
|
||||
return {"version": "v1"}
|
||||
|
||||
|
||||
class OverviewFindingSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(default="n/a")
|
||||
new = serializers.IntegerField()
|
||||
@@ -3049,7 +3082,12 @@ class LighthouseProviderConfigCreateSerializer(RLSSerializer, BaseWriteSerialize
|
||||
Accepts credentials as JSON; stored encrypted via credentials_decoded.
|
||||
"""
|
||||
|
||||
credentials = serializers.JSONField(write_only=True, required=True)
|
||||
credentials = LighthouseCredentialsField(write_only=True, required=True)
|
||||
base_url = serializers.URLField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text="Base URL for the LLM provider API. Required for 'openai_compatible' provider type.",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = LighthouseProviderConfiguration
|
||||
@@ -3061,7 +3099,10 @@ class LighthouseProviderConfigCreateSerializer(RLSSerializer, BaseWriteSerialize
|
||||
]
|
||||
extra_kwargs = {
|
||||
"is_active": {"required": False},
|
||||
"base_url": {"required": False, "allow_null": True},
|
||||
"provider_type": {
|
||||
"help_text": "LLM provider type. Determines which credential format to use. "
|
||||
"See 'credentials' field documentation for provider-specific requirements."
|
||||
},
|
||||
}
|
||||
|
||||
def create(self, validated_data):
|
||||
@@ -3084,6 +3125,7 @@ class LighthouseProviderConfigCreateSerializer(RLSSerializer, BaseWriteSerialize
|
||||
def validate(self, attrs):
|
||||
provider_type = attrs.get("provider_type")
|
||||
credentials = attrs.get("credentials") or {}
|
||||
base_url = attrs.get("base_url")
|
||||
|
||||
if provider_type == LighthouseProviderConfiguration.LLMProviderChoices.OPENAI:
|
||||
try:
|
||||
@@ -3096,6 +3138,35 @@ class LighthouseProviderConfigCreateSerializer(RLSSerializer, BaseWriteSerialize
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
elif (
|
||||
provider_type == LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
try:
|
||||
BedrockCredentialsSerializer(data=credentials).is_valid(
|
||||
raise_exception=True
|
||||
)
|
||||
except ValidationError as e:
|
||||
details = e.detail.copy()
|
||||
for key, value in details.items():
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
elif (
|
||||
provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE
|
||||
):
|
||||
if not base_url:
|
||||
raise ValidationError({"base_url": "Base URL is required."})
|
||||
try:
|
||||
OpenAICompatibleCredentialsSerializer(data=credentials).is_valid(
|
||||
raise_exception=True
|
||||
)
|
||||
except ValidationError as e:
|
||||
details = e.detail.copy()
|
||||
for key, value in details.items():
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
@@ -3105,7 +3176,12 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
Update serializer for LighthouseProviderConfiguration.
|
||||
"""
|
||||
|
||||
credentials = serializers.JSONField(write_only=True, required=False)
|
||||
credentials = LighthouseCredentialsField(write_only=True, required=False)
|
||||
base_url = serializers.URLField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
help_text="Base URL for the LLM provider API. Required for 'openai_compatible' provider type.",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = LighthouseProviderConfiguration
|
||||
@@ -3119,7 +3195,6 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"provider_type": {"read_only": True},
|
||||
"base_url": {"required": False, "allow_null": True},
|
||||
"is_active": {"required": False},
|
||||
}
|
||||
|
||||
@@ -3130,7 +3205,11 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
setattr(instance, attr, value)
|
||||
|
||||
if credentials is not None:
|
||||
instance.credentials_decoded = credentials
|
||||
# Merge partial credentials with existing ones
|
||||
# New values overwrite existing ones, but unspecified fields are preserved
|
||||
existing_credentials = instance.credentials_decoded or {}
|
||||
merged_credentials = {**existing_credentials, **credentials}
|
||||
instance.credentials_decoded = merged_credentials
|
||||
|
||||
instance.save()
|
||||
return instance
|
||||
@@ -3138,6 +3217,7 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
def validate(self, attrs):
|
||||
provider_type = getattr(self.instance, "provider_type", None)
|
||||
credentials = attrs.get("credentials", None)
|
||||
base_url = attrs.get("base_url", None)
|
||||
|
||||
if (
|
||||
credentials is not None
|
||||
@@ -3154,6 +3234,40 @@ class LighthouseProviderConfigUpdateSerializer(BaseWriteSerializer):
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
elif (
|
||||
credentials is not None
|
||||
and provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
try:
|
||||
BedrockCredentialsUpdateSerializer(data=credentials).is_valid(
|
||||
raise_exception=True
|
||||
)
|
||||
except ValidationError as e:
|
||||
details = e.detail.copy()
|
||||
for key, value in details.items():
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
elif (
|
||||
credentials is not None
|
||||
and provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE
|
||||
):
|
||||
if base_url is None:
|
||||
pass
|
||||
elif not base_url:
|
||||
raise ValidationError({"base_url": "Base URL cannot be empty."})
|
||||
try:
|
||||
OpenAICompatibleCredentialsSerializer(data=credentials).is_valid(
|
||||
raise_exception=True
|
||||
)
|
||||
except ValidationError as e:
|
||||
details = e.detail.copy()
|
||||
for key, value in details.items():
|
||||
e.detail[f"credentials/{key}"] = value
|
||||
del e.detail[key]
|
||||
raise e
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
@@ -3171,7 +3285,7 @@ class LighthouseTenantConfigSerializer(RLSSerializer):
|
||||
|
||||
def get_url(self, obj):
|
||||
request = self.context.get("request")
|
||||
return reverse("lighthouse-config", request=request)
|
||||
return reverse("lighthouse-configurations", request=request)
|
||||
|
||||
class Meta:
|
||||
model = LighthouseTenantConfiguration
|
||||
@@ -3319,3 +3433,176 @@ class LighthouseProviderModelsUpdateSerializer(BaseWriteSerializer):
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
}
|
||||
|
||||
|
||||
# Mute Rules
|
||||
|
||||
|
||||
class MuteRuleSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for reading MuteRule instances.
|
||||
"""
|
||||
|
||||
finding_uids = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
read_only=True,
|
||||
help_text="List of finding UIDs that are muted by this rule",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = MuteRule
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"name",
|
||||
"reason",
|
||||
"enabled",
|
||||
"created_by",
|
||||
"finding_uids",
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"created_by": "api.v1.serializers.UserIncludeSerializer",
|
||||
}
|
||||
|
||||
|
||||
class MuteRuleCreateSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
"""
|
||||
Serializer for creating new MuteRule instances.
|
||||
|
||||
Accepts finding_ids in the request, converts them to UIDs, and stores in finding_uids.
|
||||
"""
|
||||
|
||||
finding_ids = serializers.ListField(
|
||||
child=serializers.UUIDField(),
|
||||
write_only=True,
|
||||
required=True,
|
||||
help_text="List of Finding IDs to mute (will be converted to UIDs)",
|
||||
)
|
||||
finding_uids = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
read_only=True,
|
||||
help_text="List of finding UIDs that are muted by this rule",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = MuteRule
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"name",
|
||||
"reason",
|
||||
"enabled",
|
||||
"created_by",
|
||||
"finding_ids",
|
||||
"finding_uids",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"enabled": {"read_only": True},
|
||||
"created_by": {"read_only": True},
|
||||
}
|
||||
|
||||
def validate_name(self, value):
|
||||
"""Validate that the name is unique within the tenant."""
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if MuteRule.objects.filter(tenant_id=tenant_id, name=value).exists():
|
||||
raise ValidationError("A mute rule with this name already exists.")
|
||||
return value
|
||||
|
||||
def validate_finding_ids(self, value):
|
||||
"""Validate that all finding IDs exist and belong to the tenant."""
|
||||
if not value:
|
||||
raise ValidationError("At least one finding_id must be provided.")
|
||||
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
|
||||
# Check that all findings exist and belong to this tenant
|
||||
findings = Finding.all_objects.filter(tenant_id=tenant_id, id__in=value)
|
||||
found_ids = set(findings.values_list("id", flat=True))
|
||||
provided_ids = set(value)
|
||||
|
||||
missing_ids = provided_ids - found_ids
|
||||
if missing_ids:
|
||||
raise ValidationError(
|
||||
f"The following finding IDs do not exist or do not belong to your tenant: {missing_ids}"
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def validate(self, data):
|
||||
"""Validate the entire mute rule, including overlap detection."""
|
||||
data = super().validate(data)
|
||||
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
finding_ids = data.get("finding_ids", [])
|
||||
|
||||
if not finding_ids:
|
||||
return data
|
||||
|
||||
# Convert finding IDs to UIDs (deduplicate in case multiple findings have same UID)
|
||||
findings = Finding.all_objects.filter(id__in=finding_ids, tenant_id=tenant_id)
|
||||
finding_uids = list(set(findings.values_list("uid", flat=True)))
|
||||
|
||||
# Check for overlaps with existing enabled rules
|
||||
existing_rules = MuteRule.objects.filter(tenant_id=tenant_id, enabled=True)
|
||||
|
||||
for rule in existing_rules:
|
||||
overlap = set(finding_uids) & set(rule.finding_uids)
|
||||
if overlap:
|
||||
raise ConflictException(
|
||||
detail=f"The following finding UIDs are already muted by rule '{rule.name}': {overlap}"
|
||||
)
|
||||
|
||||
# Store finding_uids in validated_data for create
|
||||
data["finding_uids"] = finding_uids
|
||||
|
||||
return data
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Create a new mute rule and set created_by."""
|
||||
# Remove finding_ids from validated_data (we've already converted to finding_uids)
|
||||
validated_data.pop("finding_ids", None)
|
||||
|
||||
# Set created_by to the current user
|
||||
request = self.context.get("request")
|
||||
if request and hasattr(request, "user"):
|
||||
validated_data["created_by"] = request.user
|
||||
|
||||
return super().create(validated_data)
|
||||
|
||||
|
||||
class MuteRuleUpdateSerializer(BaseWriteSerializer):
|
||||
"""
|
||||
Serializer for updating MuteRule instances.
|
||||
"""
|
||||
|
||||
class Meta:
|
||||
model = MuteRule
|
||||
fields = [
|
||||
"id",
|
||||
"name",
|
||||
"reason",
|
||||
"enabled",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"name": {"required": False},
|
||||
"reason": {"required": False},
|
||||
"enabled": {"required": False},
|
||||
}
|
||||
|
||||
def validate_name(self, value):
|
||||
"""Validate that the name is unique within the tenant, excluding current instance."""
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if (
|
||||
MuteRule.objects.filter(tenant_id=tenant_id, name=value)
|
||||
.exclude(id=self.instance.id)
|
||||
.exists()
|
||||
):
|
||||
raise ValidationError("A mute rule with this name already exists.")
|
||||
return value
|
||||
|
||||
@@ -21,6 +21,7 @@ from api.v1.views import (
|
||||
LighthouseProviderModelsViewSet,
|
||||
LighthouseTenantConfigViewSet,
|
||||
MembershipViewSet,
|
||||
MuteRuleViewSet,
|
||||
OverviewViewSet,
|
||||
ProcessorViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
@@ -80,6 +81,7 @@ router.register(
|
||||
LighthouseProviderModelsViewSet,
|
||||
basename="lighthouse-models",
|
||||
)
|
||||
router.register(r"mute-rules", MuteRuleViewSet, basename="mute-rule")
|
||||
|
||||
tenants_router = routers.NestedSimpleRouter(router, r"tenants", lookup="tenant")
|
||||
tenants_router.register(
|
||||
@@ -150,13 +152,12 @@ urlpatterns = [
|
||||
),
|
||||
name="provider_group-providers-relationship",
|
||||
),
|
||||
# Lighthouse tenant config as singleton endpoint
|
||||
path(
|
||||
"lighthouse/configuration",
|
||||
LighthouseTenantConfigViewSet.as_view(
|
||||
{"get": "list", "patch": "partial_update"}
|
||||
),
|
||||
name="lighthouse-config",
|
||||
name="lighthouse-configurations",
|
||||
),
|
||||
# API endpoint to start SAML SSO flow
|
||||
path(
|
||||
|
||||
+167
-41
@@ -66,6 +66,7 @@ from tasks.tasks import (
|
||||
delete_provider_task,
|
||||
delete_tenant_task,
|
||||
jira_integration_task,
|
||||
mute_historical_findings_task,
|
||||
perform_scan_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
)
|
||||
@@ -90,6 +91,7 @@ from api.filters import (
|
||||
LighthouseProviderConfigFilter,
|
||||
LighthouseProviderModelsFilter,
|
||||
MembershipFilter,
|
||||
MuteRuleFilter,
|
||||
ProcessorFilter,
|
||||
ProviderFilter,
|
||||
ProviderGroupFilter,
|
||||
@@ -115,6 +117,7 @@ from api.models import (
|
||||
LighthouseProviderModels,
|
||||
LighthouseTenantConfiguration,
|
||||
Membership,
|
||||
MuteRule,
|
||||
Processor,
|
||||
Provider,
|
||||
ProviderGroup,
|
||||
@@ -147,7 +150,7 @@ from api.utils import (
|
||||
validate_invitation,
|
||||
)
|
||||
from api.uuid_utils import datetime_to_uuid7, uuid7_start
|
||||
from api.v1.mixins import PaginateByPkMixin, TaskManagementMixin
|
||||
from api.v1.mixins import DisablePaginationMixin, PaginateByPkMixin, TaskManagementMixin
|
||||
from api.v1.serializers import (
|
||||
ComplianceOverviewAttributesSerializer,
|
||||
ComplianceOverviewDetailSerializer,
|
||||
@@ -175,7 +178,11 @@ from api.v1.serializers import (
|
||||
LighthouseTenantConfigSerializer,
|
||||
LighthouseTenantConfigUpdateSerializer,
|
||||
MembershipSerializer,
|
||||
MuteRuleCreateSerializer,
|
||||
MuteRuleSerializer,
|
||||
MuteRuleUpdateSerializer,
|
||||
OverviewFindingSerializer,
|
||||
OverviewProviderCountSerializer,
|
||||
OverviewProviderSerializer,
|
||||
OverviewServiceSerializer,
|
||||
OverviewSeveritySerializer,
|
||||
@@ -413,6 +420,12 @@ class SchemaView(SpectacularAPIView):
|
||||
"description": "Endpoints for API keys management. These can be used as an alternative to JWT "
|
||||
"authorization.",
|
||||
},
|
||||
{
|
||||
"name": "Mute Rules",
|
||||
"description": "Endpoints for simple mute rules management. These can be used as an alternative to the"
|
||||
" Mutelist Processor if you need to mute specific findings across your tenant with a "
|
||||
"specific reason.",
|
||||
},
|
||||
]
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
@@ -1431,7 +1444,7 @@ class ProviderGroupProvidersRelationshipView(RelationshipView, BaseRLSViewSet):
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
@method_decorator(CACHE_DECORATOR, name="retrieve")
|
||||
class ProviderViewSet(BaseRLSViewSet):
|
||||
class ProviderViewSet(DisablePaginationMixin, BaseRLSViewSet):
|
||||
queryset = Provider.objects.all()
|
||||
serializer_class = ProviderSerializer
|
||||
http_method_names = ["get", "post", "patch", "delete"]
|
||||
@@ -3691,6 +3704,13 @@ class ComplianceOverviewViewSet(BaseRLSViewSet, TaskManagementMixin):
|
||||
"each provider are considered in the aggregation to ensure accurate and up-to-date insights."
|
||||
),
|
||||
),
|
||||
providers_count=extend_schema(
|
||||
summary="Get provider counts grouped by type",
|
||||
description=(
|
||||
"Retrieve the number of providers grouped by provider type. "
|
||||
"This endpoint counts every provider in the tenant, including those without completed scans."
|
||||
),
|
||||
),
|
||||
findings=extend_schema(
|
||||
summary="Get aggregated findings data",
|
||||
description=(
|
||||
@@ -3742,6 +3762,8 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
def get_serializer_class(self):
|
||||
if self.action == "providers":
|
||||
return OverviewProviderSerializer
|
||||
elif self.action == "providers_count":
|
||||
return OverviewProviderCountSerializer
|
||||
elif self.action == "findings":
|
||||
return OverviewFindingSerializer
|
||||
elif self.action == "findings_severity":
|
||||
@@ -3790,10 +3812,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
|
||||
findings_aggregated = (
|
||||
queryset.filter(scan_id__in=latest_scan_ids)
|
||||
.values(
|
||||
"scan__provider_id",
|
||||
provider=F("scan__provider__provider"),
|
||||
)
|
||||
.values(provider=F("scan__provider__provider"))
|
||||
.annotate(
|
||||
findings_passed=Coalesce(Sum("_pass"), 0),
|
||||
findings_failed=Coalesce(Sum("fail"), 0),
|
||||
@@ -3802,13 +3821,16 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
)
|
||||
)
|
||||
|
||||
resources_aggregated = (
|
||||
Resource.all_objects.filter(tenant_id=tenant_id)
|
||||
.values("provider_id")
|
||||
.annotate(total_resources=Count("id"))
|
||||
)
|
||||
resources_queryset = Resource.all_objects.filter(tenant_id=tenant_id)
|
||||
if hasattr(self, "allowed_providers"):
|
||||
resources_queryset = resources_queryset.filter(
|
||||
provider__in=self.allowed_providers
|
||||
)
|
||||
resources_aggregated = resources_queryset.values(
|
||||
provider_type=F("provider__provider")
|
||||
).annotate(total_resources=Count("id"))
|
||||
resource_map = {
|
||||
row["provider_id"]: row["total_resources"] for row in resources_aggregated
|
||||
row["provider_type"]: row["total_resources"] for row in resources_aggregated
|
||||
}
|
||||
|
||||
overview = []
|
||||
@@ -3816,7 +3838,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
overview.append(
|
||||
{
|
||||
"provider": row["provider"],
|
||||
"total_resources": resource_map.get(row["scan__provider_id"], 0),
|
||||
"total_resources": resource_map.get(row["provider"], 0),
|
||||
"total_findings": row["total_findings"],
|
||||
"findings_passed": row["findings_passed"],
|
||||
"findings_failed": row["findings_failed"],
|
||||
@@ -3829,6 +3851,36 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
@action(
|
||||
detail=False,
|
||||
methods=["get"],
|
||||
url_path="providers/count",
|
||||
url_name="providers-count",
|
||||
)
|
||||
def providers_count(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
providers_qs = Provider.objects.filter(tenant_id=tenant_id)
|
||||
|
||||
if hasattr(self, "allowed_providers"):
|
||||
allowed_ids = list(self.allowed_providers.values_list("id", flat=True))
|
||||
if not allowed_ids:
|
||||
overview = []
|
||||
return Response(
|
||||
self.get_serializer(overview, many=True).data,
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
providers_qs = providers_qs.filter(id__in=allowed_ids)
|
||||
|
||||
overview = (
|
||||
providers_qs.values("provider")
|
||||
.annotate(count=Count("id"))
|
||||
.order_by("provider")
|
||||
)
|
||||
return Response(
|
||||
self.get_serializer(overview, many=True).data,
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings")
|
||||
def findings(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
@@ -4271,28 +4323,30 @@ class LighthouseConfigViewSet(BaseRLSViewSet):
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="List all LLM provider configs",
|
||||
summary="List all LLM provider configurations",
|
||||
description="Retrieve all LLM provider configurations for the current tenant",
|
||||
),
|
||||
retrieve=extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="Retrieve LLM provider config",
|
||||
summary="Retrieve LLM provider configuration",
|
||||
description="Get details for a specific provider configuration in the current tenant.",
|
||||
),
|
||||
create=extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="Create LLM provider config",
|
||||
description="Create a per-tenant configuration for an LLM provider. Only one configuration per provider type is allowed per tenant.",
|
||||
summary="Create LLM provider configuration",
|
||||
description="Create a per-tenant configuration for an LLM provider. Only one configuration per provider type "
|
||||
"is allowed per tenant.",
|
||||
),
|
||||
partial_update=extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="Update LLM provider config",
|
||||
summary="Update LLM provider configuration",
|
||||
description="Partially update a provider configuration (e.g., base_url, is_active).",
|
||||
),
|
||||
destroy=extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="Delete LLM provider config",
|
||||
description="Delete a provider configuration. Any tenant defaults that reference this provider are cleared during deletion.",
|
||||
summary="Delete LLM provider configuration",
|
||||
description="Delete a provider configuration. Any tenant defaults that reference this provider are cleared "
|
||||
"during deletion.",
|
||||
),
|
||||
)
|
||||
class LighthouseProviderConfigViewSet(BaseRLSViewSet):
|
||||
@@ -4357,16 +4411,6 @@ class LighthouseProviderConfigViewSet(BaseRLSViewSet):
|
||||
@action(detail=True, methods=["post"], url_name="connection")
|
||||
def connection(self, request, pk=None):
|
||||
instance = self.get_object()
|
||||
if (
|
||||
instance.provider_type
|
||||
!= LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
return Response(
|
||||
data={
|
||||
"errors": [{"detail": "Only 'openai' provider supported in MVP"}]
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
with transaction.atomic():
|
||||
task = check_lighthouse_provider_connection_task.delay(
|
||||
@@ -4388,7 +4432,7 @@ class LighthouseProviderConfigViewSet(BaseRLSViewSet):
|
||||
@extend_schema(
|
||||
tags=["Lighthouse AI"],
|
||||
summary="Refresh LLM models catalog",
|
||||
description="Fetch available models for this provider configuration and upsert into catalog.",
|
||||
description="Fetch available models for this provider configuration and upsert into catalog. Supports OpenAI, OpenAI-compatible, and AWS Bedrock providers.",
|
||||
request=None,
|
||||
responses={202: OpenApiResponse(response=TaskSerializer)},
|
||||
)
|
||||
@@ -4400,16 +4444,6 @@ class LighthouseProviderConfigViewSet(BaseRLSViewSet):
|
||||
)
|
||||
def refresh_models(self, request, pk=None):
|
||||
instance = self.get_object()
|
||||
if (
|
||||
instance.provider_type
|
||||
!= LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
return Response(
|
||||
data={
|
||||
"errors": [{"detail": "Only 'openai' provider supported in MVP"}]
|
||||
},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
with transaction.atomic():
|
||||
task = refresh_lighthouse_provider_models_task.delay(
|
||||
@@ -4665,3 +4699,95 @@ class TenantApiKeyViewSet(BaseRLSViewSet):
|
||||
|
||||
serializer = self.get_serializer(instance)
|
||||
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
# MuteRules
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
tags=["Mute Rules"],
|
||||
summary="List all mute rules",
|
||||
description="Retrieve a list of all mute rules with filtering options.",
|
||||
),
|
||||
retrieve=extend_schema(
|
||||
tags=["Mute Rules"],
|
||||
summary="Retrieve a mute rule",
|
||||
description="Fetch detailed information about a specific mute rule by ID.",
|
||||
),
|
||||
create=extend_schema(
|
||||
tags=["Mute Rules"],
|
||||
summary="Create a new mute rule",
|
||||
description="Create a new mute rule by providing finding IDs, name, and reason. "
|
||||
"The rule will immediately mute the selected findings and launch a background task "
|
||||
"to mute all historical findings with matching UIDs.",
|
||||
request=MuteRuleCreateSerializer,
|
||||
),
|
||||
partial_update=extend_schema(
|
||||
tags=["Mute Rules"],
|
||||
summary="Partially update a mute rule",
|
||||
description="Update certain fields of an existing mute rule (e.g., name, reason, enabled).",
|
||||
request=MuteRuleUpdateSerializer,
|
||||
responses={200: MuteRuleSerializer},
|
||||
),
|
||||
destroy=extend_schema(
|
||||
tags=["Mute Rules"],
|
||||
summary="Delete a mute rule",
|
||||
description="Remove a mute rule from the system. Note: Previously muted findings remain muted.",
|
||||
),
|
||||
)
|
||||
class MuteRuleViewSet(BaseRLSViewSet):
|
||||
queryset = MuteRule.objects.all()
|
||||
serializer_class = MuteRuleSerializer
|
||||
filterset_class = MuteRuleFilter
|
||||
http_method_names = ["get", "post", "patch", "delete"]
|
||||
search_fields = ["name", "reason"]
|
||||
ordering = ["-inserted_at"]
|
||||
ordering_fields = [
|
||||
"name",
|
||||
"enabled",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
]
|
||||
required_permissions = [Permissions.MANAGE_SCANS]
|
||||
|
||||
def get_queryset(self):
|
||||
queryset = MuteRule.objects.filter(tenant_id=self.request.tenant_id)
|
||||
return queryset.select_related("created_by")
|
||||
|
||||
def get_serializer_class(self):
|
||||
if self.action == "create":
|
||||
return MuteRuleCreateSerializer
|
||||
elif self.action == "partial_update":
|
||||
return MuteRuleUpdateSerializer
|
||||
return super().get_serializer_class()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
# Create the mute rule
|
||||
mute_rule = serializer.save()
|
||||
|
||||
tenant_id = str(request.tenant_id)
|
||||
finding_ids = request.data.get("finding_ids", [])
|
||||
|
||||
# Immediately mute the selected findings
|
||||
Finding.all_objects.filter(
|
||||
id__in=finding_ids, tenant_id=tenant_id, muted=False
|
||||
).update(
|
||||
muted=True,
|
||||
muted_at=mute_rule.inserted_at,
|
||||
muted_reason=mute_rule.reason,
|
||||
)
|
||||
|
||||
# Launch background task for historical muting
|
||||
with transaction.atomic():
|
||||
mute_historical_findings_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "mute_rule_id": str(mute_rule.id)}
|
||||
)
|
||||
|
||||
# Return the created mute rule
|
||||
serializer = self.get_serializer(mute_rule)
|
||||
return Response(
|
||||
data=serializer.data,
|
||||
status=status.HTTP_201_CREATED,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from django.contrib import admin
|
||||
from django.urls import include, path
|
||||
|
||||
urlpatterns = [
|
||||
path("admin/", admin.site.urls),
|
||||
path("api/v1/", include("api.v1.urls")),
|
||||
]
|
||||
|
||||
@@ -23,6 +23,7 @@ from api.models import (
|
||||
Invitation,
|
||||
LighthouseConfiguration,
|
||||
Membership,
|
||||
MuteRule,
|
||||
Processor,
|
||||
Provider,
|
||||
ProviderGroup,
|
||||
@@ -499,8 +500,14 @@ def providers_fixture(tenants_fixture):
|
||||
alias="m365_testing",
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
provider7 = Provider.objects.create(
|
||||
provider="oci",
|
||||
uid="ocid1.tenancy.oc1..aaaaaaaa3dwoazoox4q7wrvriywpokp5grlhgnkwtyt6dmwyou7no6mdmzda",
|
||||
alias="oci_testing",
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
|
||||
return provider1, provider2, provider3, provider4, provider5, provider6
|
||||
return provider1, provider2, provider3, provider4, provider5, provider6, provider7
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -1419,6 +1426,34 @@ def api_keys_fixture(tenants_fixture, create_test_user):
|
||||
return [api_key1, api_key2, api_key3]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mute_rules_fixture(tenants_fixture, create_test_user, findings_fixture):
|
||||
"""Create test mute rules for testing."""
|
||||
tenant = tenants_fixture[0]
|
||||
user = create_test_user
|
||||
|
||||
# Create two mute rules: one enabled, one disabled
|
||||
mute_rule1 = MuteRule.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
name="Test Rule 1",
|
||||
reason="Security exception for testing",
|
||||
enabled=True,
|
||||
created_by=user,
|
||||
finding_uids=[findings_fixture[0].uid],
|
||||
)
|
||||
|
||||
mute_rule2 = MuteRule.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
name="Test Rule 2",
|
||||
reason="Compliance exception approved",
|
||||
enabled=False,
|
||||
created_by=user,
|
||||
finding_uids=[findings_fixture[1].uid],
|
||||
)
|
||||
|
||||
return mute_rule1, mute_rule2
|
||||
|
||||
|
||||
def get_authorization_header(access_token: str) -> dict:
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected im
|
||||
AWSWellArchitected,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.c5.c5_aws import AWSC5
|
||||
from prowler.lib.outputs.compliance.c5.c5_azure import AzureC5
|
||||
from prowler.lib.outputs.compliance.c5.c5_gcp import GCPC5
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_aws import CCC_AWS
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_azure import CCC_Azure
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_gcp import CCC_GCP
|
||||
@@ -30,6 +32,7 @@ from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_github import GithubCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_oci import OCICIS
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_azure import AzureENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_gcp import GCPENS
|
||||
@@ -87,6 +90,7 @@ COMPLIANCE_CLASS_MAP = {
|
||||
(lambda name: name.startswith("iso27001_"), AzureISO27001),
|
||||
(lambda name: name == "ccc_azure", CCC_Azure),
|
||||
(lambda name: name == "prowler_threatscore_azure", ProwlerThreatScoreAzure),
|
||||
(lambda name: name == "c5_azure", AzureC5),
|
||||
],
|
||||
"gcp": [
|
||||
(lambda name: name.startswith("cis_"), GCPCIS),
|
||||
@@ -95,6 +99,7 @@ COMPLIANCE_CLASS_MAP = {
|
||||
(lambda name: name.startswith("iso27001_"), GCPISO27001),
|
||||
(lambda name: name == "prowler_threatscore_gcp", ProwlerThreatScoreGCP),
|
||||
(lambda name: name == "ccc_gcp", CCC_GCP),
|
||||
(lambda name: name == "c5_gcp", GCPC5),
|
||||
],
|
||||
"kubernetes": [
|
||||
(lambda name: name.startswith("cis_"), KubernetesCIS),
|
||||
@@ -108,6 +113,9 @@ COMPLIANCE_CLASS_MAP = {
|
||||
"github": [
|
||||
(lambda name: name.startswith("cis_"), GithubCIS),
|
||||
],
|
||||
"oci": [
|
||||
(lambda name: name.startswith("cis_"), OCICIS),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from tasks.utils import batched
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_router import READ_REPLICA_ALIAS, MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding, Integration, Provider
|
||||
from api.utils import initialize_prowler_integration, initialize_prowler_provider
|
||||
@@ -179,7 +179,7 @@ def get_security_hub_client_from_integration(
|
||||
if the connection was successful and the SecurityHub client or connection object.
|
||||
"""
|
||||
# Get the provider associated with this integration
|
||||
with rls_transaction(tenant_id):
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
provider_relationship = integration.integrationproviderrelationship_set.first()
|
||||
if not provider_relationship:
|
||||
return Connection(
|
||||
@@ -208,7 +208,7 @@ def get_security_hub_client_from_integration(
|
||||
regions_status[region] = region in connection.enabled_regions
|
||||
|
||||
# Save regions information in the integration configuration
|
||||
with rls_transaction(tenant_id):
|
||||
with rls_transaction(tenant_id, using=MainRouter.default_db):
|
||||
integration.configuration["regions"] = regions_status
|
||||
integration.save()
|
||||
|
||||
@@ -223,7 +223,7 @@ def get_security_hub_client_from_integration(
|
||||
return True, security_hub
|
||||
else:
|
||||
# Reset regions information if connection fails
|
||||
with rls_transaction(tenant_id):
|
||||
with rls_transaction(tenant_id, using=MainRouter.default_db):
|
||||
integration.configuration["regions"] = {}
|
||||
integration.save()
|
||||
|
||||
@@ -334,8 +334,11 @@ def upload_security_hub_integration(
|
||||
f"Security Hub connection failed for integration {integration.id}: "
|
||||
f"{security_hub.error}"
|
||||
)
|
||||
integration.connected = False
|
||||
integration.save()
|
||||
with rls_transaction(
|
||||
tenant_id, using=MainRouter.default_db
|
||||
):
|
||||
integration.connected = False
|
||||
integration.save()
|
||||
break # Skip this integration
|
||||
|
||||
security_hub_client = security_hub
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from typing import Dict, Set
|
||||
from typing import Dict
|
||||
|
||||
import boto3
|
||||
import openai
|
||||
from botocore.exceptions import BotoCoreError, ClientError
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.models import LighthouseProviderConfiguration, LighthouseProviderModels
|
||||
@@ -30,16 +32,71 @@ def _extract_openai_api_key(
|
||||
return api_key
|
||||
|
||||
|
||||
def _extract_openai_compatible_params(
|
||||
provider_cfg: LighthouseProviderConfiguration,
|
||||
) -> Dict[str, str] | None:
|
||||
"""
|
||||
Extract base_url and api_key for OpenAI-compatible providers.
|
||||
"""
|
||||
creds = provider_cfg.credentials_decoded
|
||||
base_url = provider_cfg.base_url
|
||||
if not isinstance(creds, dict):
|
||||
return None
|
||||
api_key = creds.get("api_key")
|
||||
if not isinstance(api_key, str) or not api_key:
|
||||
return None
|
||||
if not isinstance(base_url, str) or not base_url:
|
||||
return None
|
||||
return {"base_url": base_url, "api_key": api_key}
|
||||
|
||||
|
||||
def _extract_bedrock_credentials(
|
||||
provider_cfg: LighthouseProviderConfiguration,
|
||||
) -> Dict[str, str] | None:
|
||||
"""
|
||||
Safely extract AWS Bedrock credentials from a provider configuration.
|
||||
|
||||
Args:
|
||||
provider_cfg (LighthouseProviderConfiguration): The provider configuration instance
|
||||
containing the credentials.
|
||||
|
||||
Returns:
|
||||
Dict[str, str] | None: Dictionary with 'access_key_id', 'secret_access_key', and
|
||||
'region' if present and valid, otherwise None.
|
||||
"""
|
||||
creds = provider_cfg.credentials_decoded
|
||||
if not isinstance(creds, dict):
|
||||
return None
|
||||
|
||||
access_key_id = creds.get("access_key_id")
|
||||
secret_access_key = creds.get("secret_access_key")
|
||||
region = creds.get("region")
|
||||
|
||||
# Validate all required fields are present and are strings
|
||||
if (
|
||||
not isinstance(access_key_id, str)
|
||||
or not access_key_id
|
||||
or not isinstance(secret_access_key, str)
|
||||
or not secret_access_key
|
||||
or not isinstance(region, str)
|
||||
or not region
|
||||
):
|
||||
return None
|
||||
|
||||
return {
|
||||
"access_key_id": access_key_id,
|
||||
"secret_access_key": secret_access_key,
|
||||
"region": region,
|
||||
}
|
||||
|
||||
|
||||
def check_lighthouse_provider_connection(provider_config_id: str) -> Dict:
|
||||
"""
|
||||
Validate a Lighthouse provider configuration by calling the provider API and
|
||||
toggle its active state accordingly.
|
||||
|
||||
Currently supports the OpenAI provider by invoking `models.list` to verify that
|
||||
the provided credentials are valid.
|
||||
|
||||
Args:
|
||||
provider_config_id (str): The primary key of the `LighthouseProviderConfiguration`
|
||||
provider_config_id: The primary key of the `LighthouseProviderConfiguration`
|
||||
to validate.
|
||||
|
||||
Returns:
|
||||
@@ -55,42 +112,238 @@ def check_lighthouse_provider_connection(provider_config_id: str) -> Dict:
|
||||
"""
|
||||
provider_cfg = LighthouseProviderConfiguration.objects.get(pk=provider_config_id)
|
||||
|
||||
# TODO: Add support for other providers
|
||||
if (
|
||||
provider_cfg.provider_type
|
||||
!= LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
return {"connected": False, "error": "Unsupported provider type"}
|
||||
|
||||
api_key = _extract_openai_api_key(provider_cfg)
|
||||
if not api_key:
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {"connected": False, "error": "API key is invalid or missing"}
|
||||
|
||||
try:
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
_ = client.models.list()
|
||||
if (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
api_key = _extract_openai_api_key(provider_cfg)
|
||||
if not api_key:
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {"connected": False, "error": "API key is invalid or missing"}
|
||||
|
||||
# Test connection by listing models
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
_ = client.models.list()
|
||||
|
||||
elif (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
bedrock_creds = _extract_bedrock_credentials(provider_cfg)
|
||||
if not bedrock_creds:
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {
|
||||
"connected": False,
|
||||
"error": "AWS credentials are invalid or missing",
|
||||
}
|
||||
|
||||
# Test connection by listing foundation models
|
||||
bedrock_client = boto3.client(
|
||||
"bedrock",
|
||||
aws_access_key_id=bedrock_creds["access_key_id"],
|
||||
aws_secret_access_key=bedrock_creds["secret_access_key"],
|
||||
region_name=bedrock_creds["region"],
|
||||
)
|
||||
_ = bedrock_client.list_foundation_models()
|
||||
|
||||
elif (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE
|
||||
):
|
||||
params = _extract_openai_compatible_params(provider_cfg)
|
||||
if not params:
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {
|
||||
"connected": False,
|
||||
"error": "Base URL or API key is invalid or missing",
|
||||
}
|
||||
|
||||
# Test connection using OpenAI SDK with custom base_url
|
||||
# Note: base_url should include version (e.g., https://openrouter.ai/api/v1)
|
||||
client = openai.OpenAI(
|
||||
api_key=params["api_key"],
|
||||
base_url=params["base_url"],
|
||||
)
|
||||
_ = client.models.list()
|
||||
|
||||
else:
|
||||
return {"connected": False, "error": "Unsupported provider type"}
|
||||
|
||||
# Connection successful
|
||||
provider_cfg.is_active = True
|
||||
provider_cfg.save()
|
||||
return {"connected": True, "error": None}
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("OpenAI connection check failed: %s", str(e))
|
||||
logger.warning(
|
||||
"%s connection check failed: %s", provider_cfg.provider_type, str(e)
|
||||
)
|
||||
provider_cfg.is_active = False
|
||||
provider_cfg.save()
|
||||
return {"connected": False, "error": str(e)}
|
||||
|
||||
|
||||
def _fetch_openai_models(api_key: str) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch available models from OpenAI API.
|
||||
|
||||
Args:
|
||||
api_key: OpenAI API key for authentication.
|
||||
|
||||
Returns:
|
||||
Dict mapping model_id to model_name. For OpenAI, both are the same
|
||||
as the API doesn't provide separate display names.
|
||||
|
||||
Raises:
|
||||
Exception: If the API call fails.
|
||||
"""
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
models = client.models.list()
|
||||
# OpenAI uses model.id for both ID and display name
|
||||
return {m.id: m.id for m in getattr(models, "data", [])}
|
||||
|
||||
|
||||
def _fetch_openai_compatible_models(base_url: str, api_key: str) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch available models from an OpenAI-compatible API using the OpenAI SDK.
|
||||
|
||||
Returns a mapping of model_id -> model_name. Prefers the 'name' attribute
|
||||
if available (e.g., from OpenRouter), otherwise falls back to 'id'.
|
||||
|
||||
Note: base_url should include version (e.g., https://openrouter.ai/api/v1)
|
||||
"""
|
||||
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
||||
models = client.models.list()
|
||||
|
||||
available_models: Dict[str, str] = {}
|
||||
for model in models.data:
|
||||
model_id = model.id
|
||||
# Prefer provider-supplied human-friendly name when available
|
||||
name = getattr(model, "name", None)
|
||||
if name:
|
||||
available_models[model_id] = name
|
||||
else:
|
||||
available_models[model_id] = model_id
|
||||
|
||||
return available_models
|
||||
|
||||
|
||||
def _fetch_bedrock_models(bedrock_creds: Dict[str, str]) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch available models from AWS Bedrock with entitlement verification.
|
||||
|
||||
This function:
|
||||
1. Lists foundation models with TEXT modality support
|
||||
2. Lists inference profiles with TEXT modality support
|
||||
3. Verifies user has entitlement access to each model
|
||||
|
||||
Args:
|
||||
bedrock_creds: Dictionary with 'access_key_id', 'secret_access_key', and 'region'.
|
||||
|
||||
Returns:
|
||||
Dict mapping model_id to model_name for all accessible models.
|
||||
|
||||
Raises:
|
||||
BotoCoreError, ClientError: If AWS API calls fail.
|
||||
"""
|
||||
bedrock_client = boto3.client(
|
||||
"bedrock",
|
||||
aws_access_key_id=bedrock_creds["access_key_id"],
|
||||
aws_secret_access_key=bedrock_creds["secret_access_key"],
|
||||
region_name=bedrock_creds["region"],
|
||||
)
|
||||
|
||||
models_to_check: Dict[str, str] = {}
|
||||
|
||||
# Step 1: Get foundation models with TEXT modality
|
||||
foundation_response = bedrock_client.list_foundation_models()
|
||||
model_summaries = foundation_response.get("modelSummaries", [])
|
||||
|
||||
for model in model_summaries:
|
||||
# Check if model supports TEXT input and output modality
|
||||
input_modalities = model.get("inputModalities", [])
|
||||
output_modalities = model.get("outputModalities", [])
|
||||
|
||||
if "TEXT" not in input_modalities or "TEXT" not in output_modalities:
|
||||
continue
|
||||
|
||||
model_id = model.get("modelId")
|
||||
if not model_id:
|
||||
continue
|
||||
|
||||
inference_types = model.get("inferenceTypesSupported", [])
|
||||
|
||||
# Only include models with ON_DEMAND inference support
|
||||
if "ON_DEMAND" in inference_types:
|
||||
models_to_check[model_id] = model["modelName"]
|
||||
|
||||
# Step 2: Get inference profiles
|
||||
try:
|
||||
inference_profiles_response = bedrock_client.list_inference_profiles()
|
||||
inference_profiles = inference_profiles_response.get(
|
||||
"inferenceProfileSummaries", []
|
||||
)
|
||||
|
||||
for profile in inference_profiles:
|
||||
# Check if profile supports TEXT modality
|
||||
input_modalities = profile.get("inputModalities", [])
|
||||
output_modalities = profile.get("outputModalities", [])
|
||||
|
||||
if "TEXT" not in input_modalities or "TEXT" not in output_modalities:
|
||||
continue
|
||||
|
||||
profile_id = profile.get("inferenceProfileId")
|
||||
if profile_id:
|
||||
models_to_check[profile_id] = profile["inferenceProfileName"]
|
||||
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.info(
|
||||
"Could not fetch inference profiles in %s: %s",
|
||||
bedrock_creds["region"],
|
||||
str(e),
|
||||
)
|
||||
|
||||
# Step 3: Verify entitlement availability for each model
|
||||
available_models: Dict[str, str] = {}
|
||||
|
||||
for model_id, model_name in models_to_check.items():
|
||||
try:
|
||||
availability = bedrock_client.get_foundation_model_availability(
|
||||
modelId=model_id
|
||||
)
|
||||
|
||||
entitlement = availability.get("entitlementAvailability")
|
||||
|
||||
# Only include models user has access to
|
||||
if entitlement == "AVAILABLE":
|
||||
available_models[model_id] = model_name
|
||||
else:
|
||||
logger.debug(
|
||||
"Skipping model %s - entitlement status: %s", model_id, entitlement
|
||||
)
|
||||
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
logger.debug(
|
||||
"Could not check availability for model %s: %s", model_id, str(e)
|
||||
)
|
||||
continue
|
||||
|
||||
return available_models
|
||||
|
||||
|
||||
def refresh_lighthouse_provider_models(provider_config_id: str) -> Dict:
|
||||
"""
|
||||
Refresh the catalog of models for a Lighthouse provider configuration.
|
||||
|
||||
For the OpenAI provider, this fetches the current list of models, upserts entries
|
||||
into `LighthouseProviderModels`, and deletes stale entries no longer returned by
|
||||
the provider.
|
||||
Fetches the current list of models from the provider, upserts entries into
|
||||
`LighthouseProviderModels`, and deletes stale entries no longer returned.
|
||||
|
||||
Args:
|
||||
provider_config_id (str): The primary key of the `LighthouseProviderConfiguration`
|
||||
provider_config_id: The primary key of the `LighthouseProviderConfiguration`
|
||||
whose models should be refreshed.
|
||||
|
||||
Returns:
|
||||
@@ -104,45 +357,81 @@ def refresh_lighthouse_provider_models(provider_config_id: str) -> Dict:
|
||||
LighthouseProviderConfiguration.DoesNotExist: If no configuration exists with the given ID.
|
||||
"""
|
||||
provider_cfg = LighthouseProviderConfiguration.objects.get(pk=provider_config_id)
|
||||
fetched_models: Dict[str, str] = {}
|
||||
|
||||
if (
|
||||
provider_cfg.provider_type
|
||||
!= LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "Unsupported provider type",
|
||||
}
|
||||
|
||||
api_key = _extract_openai_api_key(provider_cfg)
|
||||
if not api_key:
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "API key is invalid or missing",
|
||||
}
|
||||
|
||||
# Fetch models from the appropriate provider
|
||||
try:
|
||||
client = openai.OpenAI(api_key=api_key)
|
||||
models = client.models.list()
|
||||
fetched_ids: Set[str] = {m.id for m in getattr(models, "data", [])}
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.warning("OpenAI models refresh failed: %s", str(e))
|
||||
if (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI
|
||||
):
|
||||
api_key = _extract_openai_api_key(provider_cfg)
|
||||
if not api_key:
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "API key is invalid or missing",
|
||||
}
|
||||
fetched_models = _fetch_openai_models(api_key)
|
||||
|
||||
elif (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
bedrock_creds = _extract_bedrock_credentials(provider_cfg)
|
||||
if not bedrock_creds:
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "AWS credentials are invalid or missing",
|
||||
}
|
||||
fetched_models = _fetch_bedrock_models(bedrock_creds)
|
||||
|
||||
elif (
|
||||
provider_cfg.provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE
|
||||
):
|
||||
params = _extract_openai_compatible_params(provider_cfg)
|
||||
if not params:
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "Base URL or API key is invalid or missing",
|
||||
}
|
||||
fetched_models = _fetch_openai_compatible_models(
|
||||
params["base_url"], params["api_key"]
|
||||
)
|
||||
|
||||
else:
|
||||
return {
|
||||
"created": 0,
|
||||
"updated": 0,
|
||||
"deleted": 0,
|
||||
"error": "Unsupported provider type",
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Unexpected error refreshing %s models: %s",
|
||||
provider_cfg.provider_type,
|
||||
str(e),
|
||||
)
|
||||
return {"created": 0, "updated": 0, "deleted": 0, "error": str(e)}
|
||||
|
||||
# Upsert models into the catalog
|
||||
created = 0
|
||||
updated = 0
|
||||
|
||||
for model_id in fetched_ids:
|
||||
for model_id, model_name in fetched_models.items():
|
||||
obj, was_created = LighthouseProviderModels.objects.update_or_create(
|
||||
tenant_id=provider_cfg.tenant_id,
|
||||
provider_configuration=provider_cfg,
|
||||
model_id=model_id,
|
||||
defaults={
|
||||
"model_name": model_id, # OpenAI doesn't return a separate display name
|
||||
"model_name": model_name,
|
||||
"default_parameters": {},
|
||||
},
|
||||
)
|
||||
@@ -156,7 +445,7 @@ def refresh_lighthouse_provider_models(provider_config_id: str) -> Dict:
|
||||
LighthouseProviderModels.objects.filter(
|
||||
tenant_id=provider_cfg.tenant_id, provider_configuration=provider_cfg
|
||||
)
|
||||
.exclude(model_id__in=fetched_ids)
|
||||
.exclude(model_id__in=fetched_models.keys())
|
||||
.delete()
|
||||
)
|
||||
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from tasks.utils import batched
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding, MuteRule
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def mute_historical_findings(tenant_id: str, mute_rule_id: str):
|
||||
"""
|
||||
Mute historical findings that match the given mute rule.
|
||||
|
||||
This function processes findings in batches, updating their muted status
|
||||
and adding the mute reason.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for RLS context
|
||||
mute_rule_id (str): The ID of the mute rule to apply
|
||||
|
||||
Returns:
|
||||
dict: Summary of the muting operation with findings_muted count
|
||||
"""
|
||||
findings_muted_count = 0
|
||||
|
||||
# Get the list of UIDs to mute and the reason
|
||||
with rls_transaction(tenant_id):
|
||||
mute_rule = MuteRule.objects.get(id=mute_rule_id, tenant_id=tenant_id)
|
||||
finding_uids = mute_rule.finding_uids
|
||||
mute_reason = mute_rule.reason
|
||||
muted_at = mute_rule.inserted_at
|
||||
|
||||
# Query findings that match the UIDs and are not already muted
|
||||
with rls_transaction(tenant_id):
|
||||
findings_to_mute = Finding.objects.filter(
|
||||
tenant_id=tenant_id, uid__in=finding_uids, muted=False
|
||||
)
|
||||
total_findings = findings_to_mute.count()
|
||||
|
||||
logger.info(
|
||||
f"Processing {total_findings} findings for mute rule {mute_rule_id}"
|
||||
)
|
||||
|
||||
if total_findings > 0:
|
||||
for batch, is_last in batched(
|
||||
findings_to_mute.iterator(), DJANGO_FINDINGS_BATCH_SIZE
|
||||
):
|
||||
batch_ids = [f.id for f in batch]
|
||||
updated_count = Finding.all_objects.filter(
|
||||
id__in=batch_ids, tenant_id=tenant_id
|
||||
).update(
|
||||
muted=True,
|
||||
muted_at=muted_at,
|
||||
muted_reason=mute_reason,
|
||||
)
|
||||
findings_muted_count += updated_count
|
||||
|
||||
logger.info(f"Muted {findings_muted_count} findings for rule {mute_rule_id}")
|
||||
|
||||
return {
|
||||
"findings_muted": findings_muted_count,
|
||||
"rule_id": mute_rule_id,
|
||||
}
|
||||
@@ -30,6 +30,7 @@ from api.exceptions import ProviderConnectionError
|
||||
from api.models import (
|
||||
ComplianceRequirementOverview,
|
||||
Finding,
|
||||
MuteRule,
|
||||
Processor,
|
||||
Provider,
|
||||
Resource,
|
||||
@@ -301,6 +302,20 @@ def perform_prowler_scan(
|
||||
logger.error(f"Error processing mutelist rules: {e}")
|
||||
mutelist_processor = None
|
||||
|
||||
# Load enabled mute rules for this tenant
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
try:
|
||||
active_mute_rules = MuteRule.objects.filter(
|
||||
tenant_id=tenant_id, enabled=True
|
||||
).values_list("finding_uids", "reason")
|
||||
|
||||
mute_rules_cache = {}
|
||||
for finding_uids, reason in active_mute_rules:
|
||||
for uid in finding_uids:
|
||||
mute_rules_cache[uid] = reason
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading mute rules: {e}")
|
||||
mute_rules_cache = {}
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
try:
|
||||
@@ -449,11 +464,22 @@ def perform_prowler_scan(
|
||||
if not last_first_seen_at:
|
||||
last_first_seen_at = datetime.now(tz=timezone.utc)
|
||||
|
||||
# If the finding is muted at this time the reason must be the configured Mutelist
|
||||
muted_reason = "Muted by mutelist" if finding.muted else None
|
||||
# Determine if finding should be muted and why
|
||||
# Priority: mutelist processor (highest) > manual mute rules
|
||||
is_muted = False
|
||||
muted_reason = None
|
||||
|
||||
# Check mutelist processor first (highest priority)
|
||||
if finding.muted:
|
||||
is_muted = True
|
||||
muted_reason = "Muted by mutelist"
|
||||
# If not muted by mutelist, check manual mute rules
|
||||
elif finding_uid in mute_rules_cache:
|
||||
is_muted = True
|
||||
muted_reason = mute_rules_cache[finding_uid]
|
||||
|
||||
# Increment failed_findings_count cache if the finding status is FAIL and not muted
|
||||
if status == FindingStatus.FAIL and not finding.muted:
|
||||
if status == FindingStatus.FAIL and not is_muted:
|
||||
resource_uid = finding.resource_uid
|
||||
resource_failed_findings_cache[resource_uid] += 1
|
||||
|
||||
@@ -472,7 +498,8 @@ def perform_prowler_scan(
|
||||
check_id=finding.check_id,
|
||||
scan=scan_instance,
|
||||
first_seen_at=last_first_seen_at,
|
||||
muted=finding.muted,
|
||||
muted=is_muted,
|
||||
muted_at=datetime.now(tz=timezone.utc) if is_muted else None,
|
||||
muted_reason=muted_reason,
|
||||
compliance=finding.compliance,
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ from tasks.jobs.lighthouse_providers import (
|
||||
check_lighthouse_provider_connection,
|
||||
refresh_lighthouse_provider_models,
|
||||
)
|
||||
from tasks.jobs.muting import mute_historical_findings
|
||||
from tasks.jobs.report import generate_threatscore_report_job
|
||||
from tasks.jobs.scan import (
|
||||
aggregate_findings,
|
||||
@@ -681,3 +682,25 @@ def generate_threatscore_report_task(tenant_id: str, scan_id: str, provider_id:
|
||||
return generate_threatscore_report_job(
|
||||
tenant_id=tenant_id, scan_id=scan_id, provider_id=provider_id
|
||||
)
|
||||
|
||||
|
||||
@shared_task(name="findings-mute-historical")
|
||||
def mute_historical_findings_task(tenant_id: str, mute_rule_id: str):
|
||||
"""
|
||||
Background task to mute all historical findings matching a mute rule.
|
||||
|
||||
This task processes findings in batches to avoid memory issues with large datasets.
|
||||
It updates the Finding.muted, Finding.muted_at, and Finding.muted_reason fields
|
||||
for all findings whose UID is in the mute rule's finding_uids list.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant ID for RLS context.
|
||||
mute_rule_id (str): The primary key of the MuteRule to apply.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing:
|
||||
- 'findings_muted' (int): Total number of findings muted.
|
||||
- 'rule_id' (str): The mute rule ID.
|
||||
- 'status' (str): Final status ('completed').
|
||||
"""
|
||||
return mute_historical_findings(tenant_id, mute_rule_id)
|
||||
|
||||
@@ -9,6 +9,7 @@ from tasks.jobs.integrations import (
|
||||
upload_security_hub_integration,
|
||||
)
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS, MainRouter
|
||||
from api.models import Integration
|
||||
from api.utils import prowler_integration_connection_test
|
||||
from prowler.providers.aws.lib.security_hub.security_hub import SecurityHubConnection
|
||||
@@ -880,7 +881,8 @@ class TestSecurityHubIntegrationUploads:
|
||||
# Verify RLS transaction was used correctly
|
||||
# Should be called twice: once for getting provider info, once for resetting regions
|
||||
assert mock_rls.call_count == 2
|
||||
mock_rls.assert_any_call(tenant_id)
|
||||
mock_rls.assert_any_call(tenant_id, using=READ_REPLICA_ALIAS)
|
||||
mock_rls.assert_any_call(tenant_id, using=MainRouter.default_db)
|
||||
|
||||
# Verify test_connection was called with integration credentials (not provider's)
|
||||
mock_test_connection.assert_called_once_with(
|
||||
|
||||
@@ -0,0 +1,532 @@
|
||||
from datetime import datetime, timezone
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from tasks.jobs.muting import mute_historical_findings
|
||||
|
||||
from api.models import Finding, MuteRule
|
||||
from prowler.lib.check.models import Severity
|
||||
from prowler.lib.outputs.finding import Status
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestMuteHistoricalFindings:
|
||||
"""
|
||||
Test suite for the mute_historical_findings function.
|
||||
|
||||
This class tests the batch processing of findings to update their muted status
|
||||
based on MuteRule criteria.
|
||||
"""
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_user(self, create_test_user):
|
||||
"""Create a test user for mute rule creation."""
|
||||
return create_test_user
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mute_rule_with_findings(self, tenants_fixture, findings_fixture, test_user):
|
||||
"""
|
||||
Create a mute rule that targets the first finding in the fixture.
|
||||
"""
|
||||
tenant = tenants_fixture[0]
|
||||
finding = findings_fixture[0]
|
||||
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
name="Test Mute Rule",
|
||||
reason="Testing mute functionality",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=[finding.uid],
|
||||
)
|
||||
|
||||
return mute_rule
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mute_rule_multiple_findings(self, scans_fixture, test_user):
|
||||
"""
|
||||
Create multiple unmuted findings and a mute rule targeting all of them.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
tenant_id = scan.tenant_id
|
||||
|
||||
# Create 5 unmuted findings
|
||||
finding_uids = []
|
||||
for i in range(5):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=f"test_finding_uid_mute_{i}",
|
||||
scan=scan,
|
||||
status=Status.FAIL,
|
||||
status_extended=f"Test status {i}",
|
||||
impact=Severity.high,
|
||||
severity=Severity.high,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.high,
|
||||
"severity": Severity.high,
|
||||
},
|
||||
check_id=f"test_check_id_{i}",
|
||||
check_metadata={
|
||||
"CheckId": f"test_check_id_{i}",
|
||||
"Description": f"Test description {i}",
|
||||
},
|
||||
muted=False,
|
||||
)
|
||||
finding_uids.append(finding.uid)
|
||||
|
||||
# Create mute rule targeting all findings
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Multiple Findings Mute Rule",
|
||||
reason="Testing batch muting",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=finding_uids,
|
||||
)
|
||||
|
||||
return mute_rule, finding_uids
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mute_rule_already_muted(self, findings_fixture, test_user):
|
||||
"""
|
||||
Create a mute rule that targets an already-muted finding.
|
||||
"""
|
||||
tenant_id = findings_fixture[1].tenant_id
|
||||
already_muted_finding = findings_fixture[1]
|
||||
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Already Muted Rule",
|
||||
reason="Testing already muted findings",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=[already_muted_finding.uid],
|
||||
)
|
||||
|
||||
return mute_rule
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mute_rule_mixed_findings(self, scans_fixture, test_user):
|
||||
"""
|
||||
Create a mute rule with a mix of muted and unmuted findings.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
tenant_id = scan.tenant_id
|
||||
|
||||
# Create 3 unmuted findings
|
||||
unmuted_uids = []
|
||||
for i in range(3):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=f"unmuted_finding_{i}",
|
||||
scan=scan,
|
||||
status=Status.FAIL,
|
||||
status_extended=f"Unmuted status {i}",
|
||||
impact=Severity.medium,
|
||||
severity=Severity.medium,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.medium,
|
||||
"severity": Severity.medium,
|
||||
},
|
||||
check_id=f"unmuted_check_{i}",
|
||||
check_metadata={
|
||||
"CheckId": f"unmuted_check_{i}",
|
||||
"Description": f"Unmuted description {i}",
|
||||
},
|
||||
muted=False,
|
||||
)
|
||||
unmuted_uids.append(finding.uid)
|
||||
|
||||
# Create 2 already muted findings
|
||||
muted_uids = []
|
||||
for i in range(2):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=f"muted_finding_{i}",
|
||||
scan=scan,
|
||||
status=Status.FAIL,
|
||||
status_extended=f"Muted status {i}",
|
||||
impact=Severity.low,
|
||||
severity=Severity.low,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.low,
|
||||
"severity": Severity.low,
|
||||
},
|
||||
check_id=f"muted_check_{i}",
|
||||
check_metadata={
|
||||
"CheckId": f"muted_check_{i}",
|
||||
"Description": f"Muted description {i}",
|
||||
},
|
||||
muted=True,
|
||||
muted_at=datetime.now(timezone.utc),
|
||||
muted_reason="Already muted",
|
||||
)
|
||||
muted_uids.append(finding.uid)
|
||||
|
||||
# Create mute rule targeting all findings
|
||||
all_uids = unmuted_uids + muted_uids
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Mixed Findings Rule",
|
||||
reason="Testing mixed muted/unmuted findings",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=all_uids,
|
||||
)
|
||||
|
||||
return mute_rule, unmuted_uids, muted_uids
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def mute_rule_batch_test(self, scans_fixture, test_user):
|
||||
"""
|
||||
Create enough findings to test batch processing (>1000 for default batch size).
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
tenant_id = scan.tenant_id
|
||||
|
||||
# Create 1500 findings to exceed default batch size of 1000
|
||||
finding_uids = []
|
||||
for i in range(1500):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=f"batch_test_finding_{i}",
|
||||
scan=scan,
|
||||
status=Status.FAIL,
|
||||
status_extended=f"Batch test status {i}",
|
||||
impact=Severity.critical,
|
||||
severity=Severity.critical,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.critical,
|
||||
"severity": Severity.critical,
|
||||
},
|
||||
check_id=f"batch_test_check_{i}",
|
||||
check_metadata={
|
||||
"CheckId": f"batch_test_check_{i}",
|
||||
"Description": f"Batch test description {i}",
|
||||
},
|
||||
muted=False,
|
||||
)
|
||||
finding_uids.append(finding.uid)
|
||||
|
||||
# Create mute rule targeting all findings
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Batch Processing Rule",
|
||||
reason="Testing batch processing functionality",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=finding_uids,
|
||||
)
|
||||
|
||||
return mute_rule, finding_uids
|
||||
|
||||
def test_mute_historical_findings_single_finding(
|
||||
self, mute_rule_with_findings, findings_fixture
|
||||
):
|
||||
"""
|
||||
Test muting a single historical finding.
|
||||
"""
|
||||
mute_rule = mute_rule_with_findings
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
finding = findings_fixture[0]
|
||||
|
||||
# Ensure the finding is not muted before execution
|
||||
finding.refresh_from_db()
|
||||
assert finding.muted is False
|
||||
assert finding.muted_at is None
|
||||
assert finding.muted_reason is None
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify return value
|
||||
assert result["findings_muted"] == 1
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify the finding was muted
|
||||
finding.refresh_from_db()
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_reason == mute_rule.reason
|
||||
|
||||
def test_mute_historical_findings_multiple_findings(
|
||||
self, mute_rule_multiple_findings
|
||||
):
|
||||
"""
|
||||
Test muting multiple historical findings.
|
||||
"""
|
||||
mute_rule, finding_uids = mute_rule_multiple_findings
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
|
||||
# Verify all findings are unmuted
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, uid__in=finding_uids)
|
||||
assert findings.count() == 5
|
||||
for finding in findings:
|
||||
assert finding.muted is False
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify return value
|
||||
assert result["findings_muted"] == 5
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify all findings were muted
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, uid__in=finding_uids)
|
||||
for finding in findings:
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_reason == mute_rule.reason
|
||||
|
||||
def test_mute_historical_findings_already_muted(
|
||||
self, mute_rule_already_muted, findings_fixture
|
||||
):
|
||||
"""
|
||||
Test that already-muted findings are not counted or updated.
|
||||
"""
|
||||
mute_rule = mute_rule_already_muted
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
finding = findings_fixture[1]
|
||||
|
||||
# Verify the finding is already muted
|
||||
finding.refresh_from_db()
|
||||
assert finding.muted is True
|
||||
original_muted_at = finding.muted_at
|
||||
original_muted_reason = finding.muted_reason
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify no findings were muted
|
||||
assert result["findings_muted"] == 0
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify the finding's mute status did not change
|
||||
finding.refresh_from_db()
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == original_muted_at
|
||||
assert finding.muted_reason == original_muted_reason
|
||||
|
||||
def test_mute_historical_findings_mixed_status(self, mute_rule_mixed_findings):
|
||||
"""
|
||||
Test muting when some findings are already muted and others are not.
|
||||
"""
|
||||
mute_rule, unmuted_uids, muted_uids = mute_rule_mixed_findings
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify only unmuted findings were counted
|
||||
assert result["findings_muted"] == 3
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify unmuted findings are now muted
|
||||
unmuted_findings = Finding.objects.filter(
|
||||
tenant_id=tenant_id, uid__in=unmuted_uids
|
||||
)
|
||||
for finding in unmuted_findings:
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_reason == mute_rule.reason
|
||||
|
||||
# Verify already-muted findings remained unchanged
|
||||
already_muted_findings = Finding.objects.filter(
|
||||
tenant_id=tenant_id, uid__in=muted_uids
|
||||
)
|
||||
for finding in already_muted_findings:
|
||||
assert finding.muted is True
|
||||
assert finding.muted_reason == "Already muted"
|
||||
|
||||
def test_mute_historical_findings_nonexistent_rule(self, tenants_fixture):
|
||||
"""
|
||||
Test that a nonexistent mute rule raises ObjectDoesNotExist.
|
||||
"""
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
nonexistent_rule_id = str(uuid4())
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
mute_historical_findings(tenant_id, nonexistent_rule_id)
|
||||
|
||||
def test_mute_historical_findings_no_matching_findings(
|
||||
self, tenants_fixture, test_user
|
||||
):
|
||||
"""
|
||||
Test muting when no findings match the rule's UIDs.
|
||||
"""
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
|
||||
# Create a mute rule with non-existent finding UIDs
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test No Match Rule",
|
||||
reason="Testing no matching findings",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=[
|
||||
"nonexistent_uid_1",
|
||||
"nonexistent_uid_2",
|
||||
"nonexistent_uid_3",
|
||||
],
|
||||
)
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify no findings were muted
|
||||
assert result["findings_muted"] == 0
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
def test_mute_historical_findings_batch_processing(self, mute_rule_batch_test):
|
||||
"""
|
||||
Test that large numbers of findings are processed in batches correctly.
|
||||
"""
|
||||
mute_rule, finding_uids = mute_rule_batch_test
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
|
||||
# Verify all findings exist and are unmuted
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, uid__in=finding_uids)
|
||||
assert findings.count() == 1500
|
||||
for finding in findings:
|
||||
assert finding.muted is False
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify return value
|
||||
assert result["findings_muted"] == 1500
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify all findings were muted
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, uid__in=finding_uids)
|
||||
for finding in findings:
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_reason == mute_rule.reason
|
||||
|
||||
def test_mute_historical_findings_preserves_muted_at_timestamp(
|
||||
self, mute_rule_with_findings, findings_fixture
|
||||
):
|
||||
"""
|
||||
Test that muted_at is set to the rule's inserted_at, not the current time.
|
||||
"""
|
||||
mute_rule = mute_rule_with_findings
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
finding = findings_fixture[0]
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify the finding was muted
|
||||
assert result["findings_muted"] == 1
|
||||
|
||||
# Verify muted_at matches the rule's inserted_at timestamp
|
||||
finding.refresh_from_db()
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_at is not None
|
||||
|
||||
def test_mute_historical_findings_partial_match(self, scans_fixture, test_user):
|
||||
"""
|
||||
Test muting when only some of the rule's UIDs exist as findings.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
tenant_id = str(scan.tenant_id)
|
||||
|
||||
# Create 3 findings
|
||||
existing_uids = []
|
||||
for i in range(3):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=f"partial_match_finding_{i}",
|
||||
scan=scan,
|
||||
status=Status.FAIL,
|
||||
status_extended=f"Partial match status {i}",
|
||||
impact=Severity.high,
|
||||
severity=Severity.high,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.high,
|
||||
"severity": Severity.high,
|
||||
},
|
||||
check_id=f"partial_match_check_{i}",
|
||||
check_metadata={
|
||||
"CheckId": f"partial_match_check_{i}",
|
||||
"Description": f"Partial match description {i}",
|
||||
},
|
||||
muted=False,
|
||||
)
|
||||
existing_uids.append(finding.uid)
|
||||
|
||||
# Create a mute rule with both existing and non-existing UIDs
|
||||
all_uids = existing_uids + [
|
||||
"nonexistent_uid_1",
|
||||
"nonexistent_uid_2",
|
||||
]
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Partial Match Rule",
|
||||
reason="Testing partial matching",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=all_uids,
|
||||
)
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify only existing findings were muted
|
||||
assert result["findings_muted"] == 3
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
# Verify the existing findings were muted
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, uid__in=existing_uids)
|
||||
assert findings.count() == 3
|
||||
for finding in findings:
|
||||
assert finding.muted is True
|
||||
assert finding.muted_at == mute_rule.inserted_at
|
||||
assert finding.muted_reason == mute_rule.reason
|
||||
|
||||
def test_mute_historical_findings_empty_uids(self, tenants_fixture, test_user):
|
||||
"""
|
||||
Test muting when the rule has an empty finding_uids array.
|
||||
"""
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
|
||||
# Create a mute rule with empty finding_uids
|
||||
mute_rule = MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Empty UIDs Rule",
|
||||
reason="Testing empty UIDs",
|
||||
enabled=True,
|
||||
created_by=test_user,
|
||||
finding_uids=[],
|
||||
)
|
||||
|
||||
# Execute the muting function
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify no findings were muted
|
||||
assert result["findings_muted"] == 0
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
|
||||
def test_mute_historical_findings_return_format(self, mute_rule_with_findings):
|
||||
"""
|
||||
Test that the return value has the correct format and fields.
|
||||
"""
|
||||
mute_rule = mute_rule_with_findings
|
||||
tenant_id = str(mute_rule.tenant_id)
|
||||
|
||||
result = mute_historical_findings(tenant_id, str(mute_rule.id))
|
||||
|
||||
# Verify return value structure
|
||||
assert isinstance(result, dict)
|
||||
assert "findings_muted" in result
|
||||
assert "rule_id" in result
|
||||
assert isinstance(result["findings_muted"], int)
|
||||
assert isinstance(result["rule_id"], str)
|
||||
assert result["rule_id"] == str(mute_rule.id)
|
||||
@@ -18,7 +18,15 @@ from tasks.utils import CustomEncoder
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import ProviderConnectionError
|
||||
from api.models import Finding, Provider, Resource, Scan, StateChoices, StatusChoices
|
||||
from api.models import (
|
||||
Finding,
|
||||
MuteRule,
|
||||
Provider,
|
||||
Resource,
|
||||
Scan,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
)
|
||||
from prowler.lib.check.models import Severity
|
||||
|
||||
|
||||
@@ -739,6 +747,561 @@ class TestPerformScan:
|
||||
# Assert that failed_findings_count was reset to 0 during the scan
|
||||
assert resource.failed_findings_count == 0
|
||||
|
||||
def test_perform_prowler_scan_with_active_mute_rules(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test active MuteRule mutes findings with correct reason"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Create active MuteRule with specific finding UIDs
|
||||
mute_rule_reason = "Accepted risk - production exception"
|
||||
finding_uid_1 = "finding_to_mute_1"
|
||||
finding_uid_2 = "finding_to_mute_2"
|
||||
|
||||
MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Production Exception Rule",
|
||||
reason=mute_rule_reason,
|
||||
enabled=True,
|
||||
finding_uids=[finding_uid_1, finding_uid_2],
|
||||
)
|
||||
|
||||
# Mock findings: one FAIL and one PASS, both should be muted
|
||||
muted_fail_finding = MagicMock()
|
||||
muted_fail_finding.uid = finding_uid_1
|
||||
muted_fail_finding.status = StatusChoices.FAIL
|
||||
muted_fail_finding.status_extended = "muted fail"
|
||||
muted_fail_finding.severity = Severity.high
|
||||
muted_fail_finding.check_id = "muted_fail_check"
|
||||
muted_fail_finding.get_metadata.return_value = {"key": "value"}
|
||||
muted_fail_finding.resource_uid = "resource_uid_1"
|
||||
muted_fail_finding.resource_name = "resource_1"
|
||||
muted_fail_finding.region = "us-east-1"
|
||||
muted_fail_finding.service_name = "ec2"
|
||||
muted_fail_finding.resource_type = "instance"
|
||||
muted_fail_finding.resource_tags = {}
|
||||
muted_fail_finding.muted = False
|
||||
muted_fail_finding.raw = {}
|
||||
muted_fail_finding.resource_metadata = {}
|
||||
muted_fail_finding.resource_details = {}
|
||||
muted_fail_finding.partition = "aws"
|
||||
muted_fail_finding.compliance = {}
|
||||
|
||||
muted_pass_finding = MagicMock()
|
||||
muted_pass_finding.uid = finding_uid_2
|
||||
muted_pass_finding.status = StatusChoices.PASS
|
||||
muted_pass_finding.status_extended = "muted pass"
|
||||
muted_pass_finding.severity = Severity.medium
|
||||
muted_pass_finding.check_id = "muted_pass_check"
|
||||
muted_pass_finding.get_metadata.return_value = {"key": "value"}
|
||||
muted_pass_finding.resource_uid = "resource_uid_2"
|
||||
muted_pass_finding.resource_name = "resource_2"
|
||||
muted_pass_finding.region = "us-east-1"
|
||||
muted_pass_finding.service_name = "s3"
|
||||
muted_pass_finding.resource_type = "bucket"
|
||||
muted_pass_finding.resource_tags = {}
|
||||
muted_pass_finding.muted = False
|
||||
muted_pass_finding.raw = {}
|
||||
muted_pass_finding.resource_metadata = {}
|
||||
muted_pass_finding.resource_details = {}
|
||||
muted_pass_finding.partition = "aws"
|
||||
muted_pass_finding.compliance = {}
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [
|
||||
(100, [muted_fail_finding, muted_pass_finding])
|
||||
]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-east-1"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Call the function under test
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
|
||||
# Verify findings are muted with correct reason
|
||||
fail_finding_db = Finding.objects.get(uid=finding_uid_1)
|
||||
pass_finding_db = Finding.objects.get(uid=finding_uid_2)
|
||||
|
||||
assert fail_finding_db.muted
|
||||
assert fail_finding_db.muted_reason == mute_rule_reason
|
||||
assert fail_finding_db.muted_at is not None
|
||||
|
||||
assert pass_finding_db.muted
|
||||
assert pass_finding_db.muted_reason == mute_rule_reason
|
||||
assert pass_finding_db.muted_at is not None
|
||||
|
||||
# Verify failed_findings_count is 0 for muted FAIL finding
|
||||
resource_1 = Resource.objects.get(uid="resource_uid_1")
|
||||
assert resource_1.failed_findings_count == 0
|
||||
|
||||
def test_perform_prowler_scan_with_inactive_mute_rules(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test inactive MuteRule does not mute findings"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Create inactive MuteRule
|
||||
finding_uid = "finding_inactive_rule"
|
||||
MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Inactive Rule",
|
||||
reason="Should not apply",
|
||||
enabled=False,
|
||||
finding_uids=[finding_uid],
|
||||
)
|
||||
|
||||
# Mock FAIL finding
|
||||
fail_finding = MagicMock()
|
||||
fail_finding.uid = finding_uid
|
||||
fail_finding.status = StatusChoices.FAIL
|
||||
fail_finding.status_extended = "test fail"
|
||||
fail_finding.severity = Severity.high
|
||||
fail_finding.check_id = "fail_check"
|
||||
fail_finding.get_metadata.return_value = {"key": "value"}
|
||||
fail_finding.resource_uid = "resource_uid_inactive"
|
||||
fail_finding.resource_name = "resource_inactive"
|
||||
fail_finding.region = "us-east-1"
|
||||
fail_finding.service_name = "ec2"
|
||||
fail_finding.resource_type = "instance"
|
||||
fail_finding.resource_tags = {}
|
||||
fail_finding.muted = False
|
||||
fail_finding.raw = {}
|
||||
fail_finding.resource_metadata = {}
|
||||
fail_finding.resource_details = {}
|
||||
fail_finding.partition = "aws"
|
||||
fail_finding.compliance = {}
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [(100, [fail_finding])]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-east-1"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Call the function under test
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
|
||||
# Verify finding is NOT muted
|
||||
finding_db = Finding.objects.get(uid=finding_uid)
|
||||
assert not finding_db.muted
|
||||
assert finding_db.muted_reason is None
|
||||
assert finding_db.muted_at is None
|
||||
|
||||
# Verify failed_findings_count increments for FAIL finding
|
||||
resource = Resource.objects.get(uid="resource_uid_inactive")
|
||||
assert resource.failed_findings_count == 1
|
||||
|
||||
def test_perform_prowler_scan_mutelist_overrides_mute_rules(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test mutelist processor takes precedence over MuteRule"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Create active MuteRule
|
||||
finding_uid = "finding_both_rules"
|
||||
MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Manual Mute Rule",
|
||||
reason="Muted by manual rule",
|
||||
enabled=True,
|
||||
finding_uids=[finding_uid],
|
||||
)
|
||||
|
||||
# Mock finding with mutelist processor muted=True
|
||||
muted_finding = MagicMock()
|
||||
muted_finding.uid = finding_uid
|
||||
muted_finding.status = StatusChoices.FAIL
|
||||
muted_finding.status_extended = "test"
|
||||
muted_finding.severity = Severity.high
|
||||
muted_finding.check_id = "test_check"
|
||||
muted_finding.get_metadata.return_value = {"key": "value"}
|
||||
muted_finding.resource_uid = "resource_both"
|
||||
muted_finding.resource_name = "resource_both"
|
||||
muted_finding.region = "us-east-1"
|
||||
muted_finding.service_name = "ec2"
|
||||
muted_finding.resource_type = "instance"
|
||||
muted_finding.resource_tags = {}
|
||||
muted_finding.muted = True
|
||||
muted_finding.raw = {}
|
||||
muted_finding.resource_metadata = {}
|
||||
muted_finding.resource_details = {}
|
||||
muted_finding.partition = "aws"
|
||||
muted_finding.compliance = {}
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [(100, [muted_finding])]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-east-1"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Call the function under test
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
|
||||
# Verify mutelist reason takes precedence
|
||||
finding_db = Finding.objects.get(uid=finding_uid)
|
||||
assert finding_db.muted
|
||||
assert finding_db.muted_reason == "Muted by mutelist"
|
||||
assert finding_db.muted_at is not None
|
||||
|
||||
# Verify failed_findings_count is 0
|
||||
resource = Resource.objects.get(uid="resource_both")
|
||||
assert resource.failed_findings_count == 0
|
||||
|
||||
def test_perform_prowler_scan_mute_rules_multiple_findings(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test MuteRule with multiple finding UIDs mutes all findings"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Create MuteRule with multiple finding UIDs
|
||||
mute_rule_reason = "Bulk exception for dev environment"
|
||||
finding_uids = [
|
||||
"bulk_finding_1",
|
||||
"bulk_finding_2",
|
||||
"bulk_finding_3",
|
||||
"bulk_finding_4",
|
||||
]
|
||||
MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Bulk Mute Rule",
|
||||
reason=mute_rule_reason,
|
||||
enabled=True,
|
||||
finding_uids=finding_uids,
|
||||
)
|
||||
|
||||
# Mock multiple findings with mixed statuses
|
||||
findings = []
|
||||
for i, uid in enumerate(finding_uids):
|
||||
finding = MagicMock()
|
||||
finding.uid = uid
|
||||
finding.status = (
|
||||
StatusChoices.FAIL if i % 2 == 0 else StatusChoices.PASS
|
||||
)
|
||||
finding.status_extended = f"test {i}"
|
||||
finding.severity = Severity.medium
|
||||
finding.check_id = f"check_{i}"
|
||||
finding.get_metadata.return_value = {"key": f"value_{i}"}
|
||||
finding.resource_uid = f"resource_bulk_{i}"
|
||||
finding.resource_name = f"resource_{i}"
|
||||
finding.region = "us-west-2"
|
||||
finding.service_name = "lambda"
|
||||
finding.resource_type = "function"
|
||||
finding.resource_tags = {}
|
||||
finding.muted = False
|
||||
finding.raw = {}
|
||||
finding.resource_metadata = {}
|
||||
finding.resource_details = {}
|
||||
finding.partition = "aws"
|
||||
finding.compliance = {}
|
||||
findings.append(finding)
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [(100, findings)]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-west-2"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Call the function under test
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
|
||||
# Verify all findings are muted with same reason
|
||||
for uid in finding_uids:
|
||||
finding_db = Finding.objects.get(uid=uid)
|
||||
assert finding_db.muted
|
||||
assert finding_db.muted_reason == mute_rule_reason
|
||||
assert finding_db.muted_at is not None
|
||||
|
||||
# Verify all resources have failed_findings_count = 0
|
||||
for i in range(len(finding_uids)):
|
||||
resource = Resource.objects.get(uid=f"resource_bulk_{i}")
|
||||
assert resource.failed_findings_count == 0
|
||||
|
||||
def test_perform_prowler_scan_mute_rules_error_handling(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test scan continues when MuteRule loading fails"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
patch("api.models.MuteRule.objects.filter") as mock_mute_rule_filter,
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Mock MuteRule.objects.filter to raise exception
|
||||
mock_mute_rule_filter.side_effect = Exception("Database error")
|
||||
|
||||
# Mock finding
|
||||
finding = MagicMock()
|
||||
finding.uid = "finding_error_handling"
|
||||
finding.status = StatusChoices.FAIL
|
||||
finding.status_extended = "test"
|
||||
finding.severity = Severity.high
|
||||
finding.check_id = "test_check"
|
||||
finding.get_metadata.return_value = {"key": "value"}
|
||||
finding.resource_uid = "resource_error"
|
||||
finding.resource_name = "resource_error"
|
||||
finding.region = "us-east-1"
|
||||
finding.service_name = "ec2"
|
||||
finding.resource_type = "instance"
|
||||
finding.resource_tags = {}
|
||||
finding.muted = False
|
||||
finding.raw = {}
|
||||
finding.resource_metadata = {}
|
||||
finding.resource_details = {}
|
||||
finding.partition = "aws"
|
||||
finding.compliance = {}
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [(100, [finding])]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-east-1"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Call the function under test - should not raise
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
|
||||
# Verify scan completed successfully
|
||||
scan.refresh_from_db()
|
||||
assert scan.state == StateChoices.COMPLETED
|
||||
|
||||
# Verify finding is not muted (mute_rules_cache was empty dict)
|
||||
finding_db = Finding.objects.get(uid="finding_error_handling")
|
||||
assert not finding_db.muted
|
||||
assert finding_db.muted_reason is None
|
||||
|
||||
# Verify failed_findings_count increments
|
||||
resource = Resource.objects.get(uid="resource_error")
|
||||
assert resource.failed_findings_count == 1
|
||||
|
||||
def test_perform_prowler_scan_muted_at_timestamp(
|
||||
self,
|
||||
tenants_fixture,
|
||||
scans_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""Test muted_at timestamp is set correctly for muted findings"""
|
||||
with (
|
||||
patch("api.db_utils.rls_transaction"),
|
||||
patch(
|
||||
"tasks.jobs.scan.initialize_prowler_provider"
|
||||
) as mock_initialize_prowler_provider,
|
||||
patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
|
||||
patch(
|
||||
"tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
|
||||
new_callable=dict,
|
||||
),
|
||||
patch("api.compliance.PROWLER_CHECKS", new_callable=dict),
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
tenant_id = str(tenant.id)
|
||||
scan_id = str(scan.id)
|
||||
provider_id = str(provider.id)
|
||||
|
||||
# Create active MuteRule
|
||||
finding_uid = "finding_timestamp_test"
|
||||
MuteRule.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Timestamp Test Rule",
|
||||
reason="Testing timestamp",
|
||||
enabled=True,
|
||||
finding_uids=[finding_uid],
|
||||
)
|
||||
|
||||
# Mock finding
|
||||
finding = MagicMock()
|
||||
finding.uid = finding_uid
|
||||
finding.status = StatusChoices.FAIL
|
||||
finding.status_extended = "test"
|
||||
finding.severity = Severity.high
|
||||
finding.check_id = "test_check"
|
||||
finding.get_metadata.return_value = {"key": "value"}
|
||||
finding.resource_uid = "resource_timestamp"
|
||||
finding.resource_name = "resource_timestamp"
|
||||
finding.region = "us-east-1"
|
||||
finding.service_name = "ec2"
|
||||
finding.resource_type = "instance"
|
||||
finding.resource_tags = {}
|
||||
finding.muted = False
|
||||
finding.raw = {}
|
||||
finding.resource_metadata = {}
|
||||
finding.resource_details = {}
|
||||
finding.partition = "aws"
|
||||
finding.compliance = {}
|
||||
|
||||
# Mock the ProwlerScan instance
|
||||
mock_prowler_scan_instance = MagicMock()
|
||||
mock_prowler_scan_instance.scan.return_value = [(100, [finding])]
|
||||
mock_prowler_scan_class.return_value = mock_prowler_scan_instance
|
||||
|
||||
# Mock prowler_provider
|
||||
mock_prowler_provider_instance = MagicMock()
|
||||
mock_prowler_provider_instance.get_regions.return_value = ["us-east-1"]
|
||||
mock_initialize_prowler_provider.return_value = (
|
||||
mock_prowler_provider_instance
|
||||
)
|
||||
|
||||
# Capture time before and after scan
|
||||
before_scan = datetime.now(timezone.utc)
|
||||
perform_prowler_scan(tenant_id, scan_id, provider_id, [])
|
||||
after_scan = datetime.now(timezone.utc)
|
||||
|
||||
# Verify muted_at is within the scan time window
|
||||
finding_db = Finding.objects.get(uid=finding_uid)
|
||||
assert finding_db.muted
|
||||
assert finding_db.muted_at is not None
|
||||
assert before_scan <= finding_db.muted_at <= after_scan
|
||||
|
||||
|
||||
# TODO Add tests for aggregations
|
||||
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
import uuid
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import openai
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from tasks.tasks import (
|
||||
_perform_scan_complete_tasks,
|
||||
check_integrations_task,
|
||||
check_lighthouse_provider_connection_task,
|
||||
generate_outputs_task,
|
||||
refresh_lighthouse_provider_models_task,
|
||||
s3_integration_task,
|
||||
security_hub_integration_task,
|
||||
)
|
||||
|
||||
from api.models import Integration
|
||||
from api.models import (
|
||||
Integration,
|
||||
LighthouseProviderConfiguration,
|
||||
LighthouseProviderModels,
|
||||
)
|
||||
|
||||
|
||||
# TODO Move this to outputs/reports jobs
|
||||
@@ -1097,3 +1105,363 @@ class TestCheckIntegrationsTask:
|
||||
|
||||
assert result is False
|
||||
mock_upload.assert_called_once_with(self.tenant_id, self.provider_id, scan_id)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCheckLighthouseProviderConnectionTask:
|
||||
def setup_method(self):
|
||||
self.tenant_id = str(uuid.uuid4())
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider_type,credentials,base_url,expected_result",
|
||||
[
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
{"api_key": "sk-test123"},
|
||||
None,
|
||||
{"connected": True, "error": None},
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE,
|
||||
{"api_key": "sk-test123"},
|
||||
"https://openrouter.ai/api/v1",
|
||||
{"connected": True, "error": None},
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"access_key_id": "AKIA123",
|
||||
"secret_access_key": "secret",
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
{"connected": True, "error": None},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_check_connection_success_all_providers(
|
||||
self, tenants_fixture, provider_type, credentials, base_url, expected_result
|
||||
):
|
||||
"""Test successful connection check for all provider types."""
|
||||
# Create provider configuration
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=provider_type,
|
||||
base_url=base_url,
|
||||
is_active=False,
|
||||
)
|
||||
provider_cfg.credentials_decoded = credentials
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock the appropriate API calls
|
||||
with (
|
||||
patch("tasks.jobs.lighthouse_providers.openai.OpenAI") as mock_openai,
|
||||
patch("tasks.jobs.lighthouse_providers.boto3.client") as mock_boto3,
|
||||
):
|
||||
mock_client = MagicMock()
|
||||
mock_client.models.list.return_value = MagicMock()
|
||||
mock_client.list_foundation_models.return_value = {}
|
||||
mock_openai.return_value = mock_client
|
||||
mock_boto3.return_value = mock_client
|
||||
|
||||
# Execute
|
||||
result = check_lighthouse_provider_connection_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result == expected_result
|
||||
provider_cfg.refresh_from_db()
|
||||
assert provider_cfg.is_active is True
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider_type,credentials,base_url,exception_to_raise",
|
||||
[
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
{"api_key": "sk-invalid"},
|
||||
None,
|
||||
openai.AuthenticationError(
|
||||
"Invalid API key", response=MagicMock(), body=None
|
||||
),
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE,
|
||||
{"api_key": "sk-invalid"},
|
||||
"https://openrouter.ai/api/v1",
|
||||
openai.APIConnectionError(request=MagicMock()),
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"access_key_id": "AKIA123",
|
||||
"secret_access_key": "secret",
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
ClientError(
|
||||
{"Error": {"Code": "AccessDenied", "Message": "Access Denied"}},
|
||||
"list_foundation_models",
|
||||
),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_check_connection_api_failure(
|
||||
self,
|
||||
tenants_fixture,
|
||||
provider_type,
|
||||
credentials,
|
||||
base_url,
|
||||
exception_to_raise,
|
||||
):
|
||||
"""Test connection check when API calls fail."""
|
||||
# Create provider configuration
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=provider_type,
|
||||
base_url=base_url,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = credentials
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock the API to raise exception
|
||||
with (
|
||||
patch("tasks.jobs.lighthouse_providers.openai.OpenAI") as mock_openai,
|
||||
patch("tasks.jobs.lighthouse_providers.boto3.client") as mock_boto3,
|
||||
):
|
||||
mock_client = MagicMock()
|
||||
if (
|
||||
provider_type
|
||||
== LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK
|
||||
):
|
||||
mock_client.list_foundation_models.side_effect = exception_to_raise
|
||||
mock_boto3.return_value = mock_client
|
||||
else:
|
||||
mock_client.models.list.side_effect = exception_to_raise
|
||||
mock_openai.return_value = mock_client
|
||||
|
||||
# Execute
|
||||
result = check_lighthouse_provider_connection_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["connected"] is False
|
||||
assert result["error"] is not None
|
||||
provider_cfg.refresh_from_db()
|
||||
assert provider_cfg.is_active is False
|
||||
|
||||
def test_check_connection_updates_active_status(self, tenants_fixture):
|
||||
"""Test that connection check toggles is_active from True to False on failure."""
|
||||
# Create provider with is_active=True
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
base_url=None,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {"api_key": "sk-test123"}
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock API to fail
|
||||
with patch("tasks.jobs.lighthouse_providers.openai.OpenAI") as mock_openai:
|
||||
mock_client = MagicMock()
|
||||
mock_client.models.list.side_effect = openai.AuthenticationError(
|
||||
"Invalid", response=MagicMock(), body=None
|
||||
)
|
||||
mock_openai.return_value = mock_client
|
||||
|
||||
# Execute
|
||||
result = check_lighthouse_provider_connection_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert status changed
|
||||
assert result["connected"] is False
|
||||
provider_cfg.refresh_from_db()
|
||||
assert provider_cfg.is_active is False
|
||||
|
||||
def test_check_connection_provider_does_not_exist(self, tenants_fixture):
|
||||
"""Test that checking non-existent provider raises DoesNotExist."""
|
||||
non_existent_id = str(uuid.uuid4())
|
||||
|
||||
with pytest.raises(LighthouseProviderConfiguration.DoesNotExist):
|
||||
check_lighthouse_provider_connection_task(
|
||||
provider_config_id=non_existent_id,
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestRefreshLighthouseProviderModelsTask:
|
||||
def setup_method(self):
|
||||
self.tenant_id = str(uuid.uuid4())
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider_type,credentials,base_url,mock_models,expected_count",
|
||||
[
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
{"api_key": "sk-test123"},
|
||||
None,
|
||||
{"gpt-5": "gpt-5", "gpt-4o": "gpt-4o"},
|
||||
2,
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.OPENAI_COMPATIBLE,
|
||||
{"api_key": "sk-test123"},
|
||||
"https://openrouter.ai/api/v1",
|
||||
{"model-1": "Model One", "model-2": "Model Two"},
|
||||
2,
|
||||
),
|
||||
(
|
||||
LighthouseProviderConfiguration.LLMProviderChoices.BEDROCK,
|
||||
{
|
||||
"access_key_id": "AKIA123",
|
||||
"secret_access_key": "secret",
|
||||
"region": "us-east-1",
|
||||
},
|
||||
None,
|
||||
{"openai.gpt-oss-120b-1:0": "gpt-oss-120b"},
|
||||
1,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_refresh_models_create_new(
|
||||
self,
|
||||
tenants_fixture,
|
||||
provider_type,
|
||||
credentials,
|
||||
base_url,
|
||||
mock_models,
|
||||
expected_count,
|
||||
):
|
||||
"""Test creating new models for all provider types."""
|
||||
# Create provider configuration
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=provider_type,
|
||||
base_url=base_url,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = credentials
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock the fetch functions
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.lighthouse_providers._fetch_openai_models",
|
||||
return_value=mock_models,
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.lighthouse_providers._fetch_openai_compatible_models",
|
||||
return_value=mock_models,
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.lighthouse_providers._fetch_bedrock_models",
|
||||
return_value=mock_models,
|
||||
),
|
||||
):
|
||||
# Execute
|
||||
result = refresh_lighthouse_provider_models_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["created"] == expected_count
|
||||
assert result["updated"] == 0
|
||||
assert result["deleted"] == 0
|
||||
assert (
|
||||
LighthouseProviderModels.objects.filter(
|
||||
provider_configuration=provider_cfg
|
||||
).count()
|
||||
== expected_count
|
||||
)
|
||||
|
||||
def test_refresh_models_mixed_operations(self, tenants_fixture):
|
||||
"""Test mixed create, update, and delete operations."""
|
||||
# Create provider configuration
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
base_url=None,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {"api_key": "sk-test123"}
|
||||
provider_cfg.save()
|
||||
|
||||
# Create 2 existing models (A, B)
|
||||
LighthouseProviderModels.objects.create(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_configuration=provider_cfg,
|
||||
model_id="model-a",
|
||||
model_name="Model A",
|
||||
)
|
||||
LighthouseProviderModels.objects.create(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_configuration=provider_cfg,
|
||||
model_id="model-b",
|
||||
model_name="Model B",
|
||||
)
|
||||
|
||||
# Mock API to return models B (existing), C (new) - A will be deleted
|
||||
mock_models = {"model-b": "Model B", "model-c": "Model C"}
|
||||
with patch(
|
||||
"tasks.jobs.lighthouse_providers._fetch_openai_models",
|
||||
return_value=mock_models,
|
||||
):
|
||||
# Execute
|
||||
result = refresh_lighthouse_provider_models_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["created"] == 1 # model-c created
|
||||
assert result["updated"] == 1 # model-b updated
|
||||
assert result["deleted"] == 1 # model-a deleted
|
||||
|
||||
# Verify only B and C exist
|
||||
remaining_models = LighthouseProviderModels.objects.filter(
|
||||
provider_configuration=provider_cfg
|
||||
)
|
||||
assert remaining_models.count() == 2
|
||||
assert set(remaining_models.values_list("model_id", flat=True)) == {
|
||||
"model-b",
|
||||
"model-c",
|
||||
}
|
||||
|
||||
def test_refresh_models_api_exception(self, tenants_fixture):
|
||||
"""Test refresh when API raises an exception."""
|
||||
# Create provider configuration
|
||||
provider_cfg = LighthouseProviderConfiguration(
|
||||
tenant_id=tenants_fixture[0].id,
|
||||
provider_type=LighthouseProviderConfiguration.LLMProviderChoices.OPENAI,
|
||||
base_url=None,
|
||||
is_active=True,
|
||||
)
|
||||
provider_cfg.credentials_decoded = {"api_key": "sk-test123"}
|
||||
provider_cfg.save()
|
||||
|
||||
# Mock fetch to raise exception
|
||||
with patch(
|
||||
"tasks.jobs.lighthouse_providers._fetch_openai_models",
|
||||
side_effect=openai.APIError("API Error", request=MagicMock(), body=None),
|
||||
):
|
||||
# Execute
|
||||
result = refresh_lighthouse_provider_models_task(
|
||||
provider_config_id=str(provider_cfg.id),
|
||||
tenant_id=str(tenants_fixture[0].id),
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert result["created"] == 0
|
||||
assert result["updated"] == 0
|
||||
assert result["deleted"] == 0
|
||||
assert "error" in result
|
||||
assert result["error"] is not None
|
||||
|
||||
@@ -35,7 +35,8 @@ dashboard = dash.Dash(
|
||||
|
||||
# Logo
|
||||
prowler_logo = html.Img(
|
||||
src="https://prowler.com/wp-content/uploads/logo-dashboard.png", alt="Prowler Logo"
|
||||
src="https://cdn.prod.website-files.com/68c4ec3f9fb7b154fbcb6e36/68ffb46d40ed7faa37a592a5_prowler-logo.png",
|
||||
alt="Prowler Logo",
|
||||
)
|
||||
|
||||
menu_icons = {
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
data["REQUIREMENTS_DESCRIPTION"] = (
|
||||
data["REQUIREMENTS_ID"] + " - " + data["REQUIREMENTS_DESCRIPTION"]
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_DESCRIPTION"] = data["REQUIREMENTS_DESCRIPTION"].apply(
|
||||
lambda x: x[:150] + "..." if len(str(x)) > 150 else x
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
].apply(lambda x: x[:80] + "..." if len(str(x)) > 80 else x)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SUBSECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION"
|
||||
].apply(lambda x: x[:150] + "..." if len(str(x)) > 150 else x)
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
)
|
||||
@@ -125,11 +125,11 @@ Each check **must** populate the `report.status` and `report.status_extended` fi
|
||||
|
||||
The severity of each check is defined in the metadata file using the `Severity` field. Severity values are always lowercase and must be one of the predefined categories below.
|
||||
|
||||
- `critical` – Issue that must be addressed immediately.
|
||||
- `high` – Issue that should be addressed as soon as possible.
|
||||
- `medium` – Issue that should be addressed within a reasonable timeframe.
|
||||
- `low` – Issue that can be addressed in the future.
|
||||
- `informational` – Not an issue but provides valuable information.
|
||||
- `critical` – Highest potential impact with broad exposure that could affect core security boundaries or business operations.
|
||||
- `high` – Substantial potential impact with significant exposure that could affect important security controls or resources.
|
||||
- `medium` – Moderate potential impact with limited exposure that weakens defense layers but has contained scope.
|
||||
- `low` – Minimal potential impact with negligible exposure that represents minor gaps in security posture.
|
||||
- `informational` – Provides valuable information but does not affect the security posture.
|
||||
|
||||
If the check involves multiple scenarios that may alter its severity, adjustments can be made dynamically within the check's logic using the severity `report.check_metadata.Severity` attribute:
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ title: "Configuration"
|
||||
|
||||
Configure your MCP client to connect to Prowler MCP Server.
|
||||
|
||||
## Step 1: Get Your API Key (Optional)
|
||||
## Step 1: Get Your API Key
|
||||
|
||||
<Note>
|
||||
**Authentication is optional**: Prowler Hub and Prowler Documentation features work without authentication. An API key is only required for Prowler Cloud and Prowler App (Self-Managed) features.
|
||||
@@ -16,21 +16,17 @@ To use Prowler Cloud or Prowler App (Self-Managed) features. To get the API key,
|
||||
Keep the API key secure. Never share it publicly or commit it to version control.
|
||||
</Warning>
|
||||
|
||||
## Step 2: Configure Your MCP Client
|
||||
## Step 2: Configure Your MCP Host/Client
|
||||
|
||||
Choose the configuration based on your deployment:
|
||||
|
||||
- **STDIO Mode**: Local installation only (runs as subprocess).
|
||||
- **HTTP Mode**: Prowler Cloud MCP Server or self-hosted Prowler MCP Server.
|
||||
- **STDIO Mode**: Local installation only (runs as subprocess of your MCP client).
|
||||
|
||||
### HTTP Mode (Prowler Cloud MCP Server or self-hosted Prowler MCP Server)
|
||||
### HTTP Mode
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Native HTTP Support (Cursor, VSCode)">
|
||||
**Clients that support HTTP with custom headers natively**
|
||||
|
||||
For example: Cursor, VSCode, LobeChat, etc.
|
||||
|
||||
<Tab title="Generic Native HTTP Support">
|
||||
**Configuration:**
|
||||
```json
|
||||
{
|
||||
@@ -38,20 +34,15 @@ Choose the configuration based on your deployment:
|
||||
"prowler": {
|
||||
"url": "https://mcp.prowler.com/mcp", // or your self-hosted Prowler MCP Server URL
|
||||
"headers": {
|
||||
"Authorization": "Bearer pk_your_api_key_here"
|
||||
"Authorization": "Bearer <your-api-key-here>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</Tab>
|
||||
|
||||
<Tab title="Using mcp-remote (Claude Desktop)">
|
||||
**For clients without native HTTP support (like Claude Desktop)**
|
||||
|
||||
For example: Claude Desktop.
|
||||
|
||||
<Tab title="Generic without Native HTTP Support">
|
||||
**Configuration:**
|
||||
```json
|
||||
{
|
||||
@@ -65,26 +56,79 @@ Choose the configuration based on your deployment:
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
],
|
||||
"env": {
|
||||
"PROWLER_APP_API_KEY": "pk_your_api_key_here"
|
||||
"PROWLER_APP_API_KEY": "<your-api-key-here>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
<Info>
|
||||
The `mcp-remote` tool acts as a bridge for clients that don't support HTTP natively. Learn more at [mcp-remote on npm](https://www.npmjs.com/package/mcp-remote).
|
||||
</Info>
|
||||
|
||||
</Tab>
|
||||
|
||||
<Tab title="Claude Desktop">
|
||||
1. Open Claude Desktop settings
|
||||
2. Go to "Developer" tab
|
||||
3. Click in "Edit Config" button
|
||||
4. Edit the `claude_desktop_config.json` file with your favorite editor
|
||||
5. Add the following configuration:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp",
|
||||
"--header",
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
],
|
||||
"env": {
|
||||
"PROWLER_APP_API_KEY": "<your-api-key-here>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Claude Code">
|
||||
Run the following command:
|
||||
```bash
|
||||
export PROWLER_APP_API_KEY="<your-api-key-here>"
|
||||
claude mcp add --transport http prowler https://mcp.prowler.com/mcp --header "Authorization: Bearer $PROWLER_APP_API_KEY" --scope user
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Cursor">
|
||||
1. Open Cursor settings
|
||||
2. Go to "Tools & MCP"
|
||||
3. Click in "New MCP Server" button
|
||||
4. Add to the JSON Configuration the following:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"url": "https://mcp.prowler.com/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer <your-api-key-here>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
</Tab>
|
||||
|
||||
|
||||
</Tabs>
|
||||
|
||||
### STDIO Mode (Local Installation Only)
|
||||
### STDIO Mode
|
||||
|
||||
STDIO mode is only available when running the MCP server locally.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Using uvx">
|
||||
<Tab title="Generic uvx installation">
|
||||
**Run from source or local installation**
|
||||
|
||||
```json
|
||||
@@ -94,7 +138,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
"command": "uvx",
|
||||
"args": ["/absolute/path/to/prowler/mcp_server/"],
|
||||
"env": {
|
||||
"PROWLER_APP_API_KEY": "pk_your_api_key_here",
|
||||
"PROWLER_APP_API_KEY": "<your-api-key-here>",
|
||||
"PROWLER_API_BASE_URL": "https://api.prowler.com"
|
||||
}
|
||||
}
|
||||
@@ -108,7 +152,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
|
||||
</Tab>
|
||||
|
||||
<Tab title="Using Docker">
|
||||
<Tab title="Generic Docker installation">
|
||||
**Run with Docker image**
|
||||
|
||||
```json
|
||||
@@ -121,7 +165,7 @@ STDIO mode is only available when running the MCP server locally.
|
||||
"--rm",
|
||||
"-i",
|
||||
"--env",
|
||||
"PROWLER_APP_API_KEY=pk_your_api_key_here",
|
||||
"PROWLER_APP_API_KEY=<your-api-key-here>",
|
||||
"--env",
|
||||
"PROWLER_API_BASE_URL=https://api.prowler.com",
|
||||
"prowlercloud/prowler-mcp"
|
||||
@@ -154,7 +198,7 @@ Prowler MCP Server supports two authentication methods to connect to Prowler Clo
|
||||
Use your Prowler API key directly in the Bearer token:
|
||||
|
||||
```
|
||||
Authorization: Bearer pk_your_api_key_here
|
||||
Authorization: Bearer <your-api-key-here>
|
||||
```
|
||||
|
||||
This is the recommended method for most users.
|
||||
|
||||
@@ -61,6 +61,59 @@ The Prowler MCP Server enables powerful workflows through AI assistants:
|
||||
- "What authentication methods does Prowler support for Azure?"
|
||||
- "How can I contribute with a new security check to Prowler?"
|
||||
|
||||
### Example: Creating a custom dashboard with Prowler extracted data
|
||||
|
||||
In the next example you can see how to create a dashboard using Prowler MCP Server and Claude Desktop.
|
||||
|
||||
**Used Prompt:**
|
||||
```
|
||||
Generate me a security dashboard for the Prowler open source project using live data from Prowler MCP tools.
|
||||
|
||||
REQUIREMENTS:
|
||||
1. Fetch real-time data from Prowler Findings using MCP tools
|
||||
2. Create a single self-contained HTML file and display it
|
||||
3. Dashboard must be production-ready with modern design
|
||||
|
||||
DATA TO FETCH:
|
||||
Use these MCP tools in this order:
|
||||
1. Prowler app list providers - To get all available configured provider in the account
|
||||
2. Prowler app get latest findings - To get findings information, if there are so many you can use the filter_fields to get less information, or pagination to get in different batches
|
||||
3. For most critical findings you can get more context and remediation with Prowler Hub to get remediations for example
|
||||
|
||||
DESIGN REQUIREMENTS:
|
||||
- Dark theme (gradient background: #0a0e27 to #131830)
|
||||
- Card-based layout with glassmorphism effects
|
||||
- Color scheme:
|
||||
* Primary green
|
||||
* Secondary purple
|
||||
- Modern, professional look
|
||||
- Animated "LIVE DATA" indicator (pulsing green badge)
|
||||
- Hover effects on all cards (lift, glow, border color change)
|
||||
- Responsive grid layout
|
||||
- Mobile-responsive breakpoints at 768px
|
||||
- Single HTML file with all CSS and JavaScript embedded
|
||||
- No external dependencies
|
||||
|
||||
SPECIFIC DETAILS TO INCLUDE:
|
||||
- Show actual counts from the data (don't hardcode numbers)
|
||||
- Add timestamp showing when dashboard was generated
|
||||
- Link to GitHub repository: https://github.com/prowler-cloud/prowler
|
||||
|
||||
OUTPUT:
|
||||
Generate the complete HTML file and display it
|
||||
```
|
||||
|
||||
**Video:**
|
||||
<iframe
|
||||
className="w-full aspect-video rounded-xl"
|
||||
src="https://www.youtube.com/embed/li29KNmYd4g?si=P3m6eB2z0Cqqse_H"
|
||||
title="Prowler MCP Server - Creating a dashboard"
|
||||
frameBorder="0"
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
|
||||
|
||||
## Deployment Options
|
||||
|
||||
Prowler MCP Server can be used in three ways:
|
||||
|
||||
@@ -31,7 +31,7 @@ The supported providers right now are:
|
||||
| [Kubernetes](/user-guide/providers/kubernetes/in-cluster) | Official | UI, API, CLI |
|
||||
| [M365](/user-guide/providers/microsoft365/getting-started-m365) | Official | UI, API, CLI |
|
||||
| [Github](/user-guide/providers/github/getting-started-github) | Official | UI, API, CLI |
|
||||
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | CLI |
|
||||
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | UI, API, CLI |
|
||||
| [Infra as Code](/user-guide/providers/iac/getting-started-iac) | Official | CLI |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | CLI |
|
||||
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | CLI |
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
title: "Prowler ThreatScore Documentation"
|
||||
---
|
||||
|
||||
|
||||
|
||||
<Info>This feature is only available in Prowler Cloud/App.</Info>
|
||||
|
||||
## Introduction
|
||||
|
||||
The **Prowler ThreatScore** is a comprehensive compliance scoring system that provides a unified metric for assessing your organization's security posture across compliance frameworks. It aggregates findings from individual security checks into a single, normalized score ranging from 0 to 100.
|
||||
|
||||
+28
-2
@@ -6,7 +6,32 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
### Added
|
||||
- GitHub provider check `organization_default_repository_permission_strict` [(#8785)](https://github.com/prowler-cloud/prowler/pull/8785)
|
||||
- Add OCI mapping to scan and check classes [(#8927)](https://github.com/prowler-cloud/prowler/pull/8927)
|
||||
- `codepipeline_project_repo_private` check for AWS provider [(#5915)](https://github.com/prowler-cloud/prowler/pull/5915)
|
||||
- `cloudstorage_bucket_versioning_enabled` check for GCP provider [(#9014)](https://github.com/prowler-cloud/prowler/pull/9014)
|
||||
- `cloudstorage_bucket_soft_delete_enabled` check for GCP provider [(#9028)](https://github.com/prowler-cloud/prowler/pull/9028)
|
||||
- `cloudstorage_bucket_logging_enabled` check for GCP provider [(#9091)](https://github.com/prowler-cloud/prowler/pull/9091)
|
||||
- C5 compliance framework for Azure provider [(#9081)](https://github.com/prowler-cloud/prowler/pull/9081)
|
||||
- C5 compliance framework for the GCP provider [(#9097)](https://github.com/prowler-cloud/prowler/pull/9097)
|
||||
|
||||
### Changed
|
||||
- Update AWS Direct Connect service metadata to new format [(#8855)](https://github.com/prowler-cloud/prowler/pull/8855)
|
||||
- Update AWS DRS service metadata to new format [(#8870)](https://github.com/prowler-cloud/prowler/pull/8870)
|
||||
- Update AWS DynamoDB service metadata to new format [(#8871)](https://github.com/prowler-cloud/prowler/pull/8871)
|
||||
- Update AWS CloudWatch service metadata to new format [(#8848)](https://github.com/prowler-cloud/prowler/pull/8848)
|
||||
- Update AWS EMR service metadata to new format [(#9002)](https://github.com/prowler-cloud/prowler/pull/9002)
|
||||
- Update AWS EKS service metadata to new format [(#8890)](https://github.com/prowler-cloud/prowler/pull/8890)
|
||||
- Update AWS Elastic Beanstalk service metadata to new format [(#8934)](https://github.com/prowler-cloud/prowler/pull/8934)
|
||||
- Update AWS ElastiCache service metadata to new format [(#8933)](https://github.com/prowler-cloud/prowler/pull/8933)
|
||||
- Update AWS CodeBuild service metadata to new format [(#8851)](https://github.com/prowler-cloud/prowler/pull/8851)
|
||||
- Update GCP Artifact Registry service metadata to new format [(#9088)](https://github.com/prowler-cloud/prowler/pull/9088)
|
||||
- Update AWS EFS service metadata to new format [(#8889)](https://github.com/prowler-cloud/prowler/pull/8889)
|
||||
- Update AWS EventBridge service metadata to new format [(#9003)](https://github.com/prowler-cloud/prowler/pull/9003)
|
||||
- Update AWS Firehose service metadata to new format [(#9004)](https://github.com/prowler-cloud/prowler/pull/9004)
|
||||
- Update AWS FMS service metadata to new format [(#9005)](https://github.com/prowler-cloud/prowler/pull/9005)
|
||||
- Update AWS FSx service metadata to new format [(#9006)](https://github.com/prowler-cloud/prowler/pull/9006)
|
||||
- Update AWS Glacier service metadata to new format [(#9007)](https://github.com/prowler-cloud/prowler/pull/9007)
|
||||
- Update AWS CodeArtifact service metadata to new format [(#8850)](https://github.com/prowler-cloud/prowler/pull/8850)
|
||||
|
||||
---
|
||||
|
||||
@@ -14,7 +39,9 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
### Fixed
|
||||
- Add `resource_name` for checks under `logging` for the GCP provider [(#9023)](https://github.com/prowler-cloud/prowler/pull/9023)
|
||||
|
||||
- Fix `ec2_instance_with_outdated_ami` check to handle None AMIs [(#9046)](https://github.com/prowler-cloud/prowler/pull/9046)
|
||||
- Handle timestamp when transforming compliance findings in CCC [(#9042)](https://github.com/prowler-cloud/prowler/pull/9042)
|
||||
- Update `resource_id` for admincenter service and avoid unnecessary msgraph requests [(#9019)](https://github.com/prowler-cloud/prowler/pull/9019)
|
||||
|
||||
---
|
||||
|
||||
@@ -60,7 +87,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- Update AWS Directory Service service metadata to new format [(#8859)](https://github.com/prowler-cloud/prowler/pull/8859)
|
||||
- Update AWS CloudFront service metadata to new format [(#8829)](https://github.com/prowler-cloud/prowler/pull/8829)
|
||||
- Deprecate user authentication for M365 provider [(#8865)](https://github.com/prowler-cloud/prowler/pull/8865)
|
||||
- Update AWS EFS service metadata to new format [(#8889)](https://github.com/prowler-cloud/prowler/pull/8889)
|
||||
|
||||
### Fixed
|
||||
- Fix SNS topics showing empty AWS_ResourceID in Quick Inventory output [(#8762)](https://github.com/prowler-cloud/prowler/issues/8762)
|
||||
|
||||
+27
-1
@@ -49,10 +49,12 @@ from prowler.lib.outputs.asff.asff import ASFF
|
||||
from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected import (
|
||||
AWSWellArchitected,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.c5.c5_aws import AWSC5
|
||||
from prowler.lib.outputs.compliance.c5.c5_azure import AzureC5
|
||||
from prowler.lib.outputs.compliance.c5.c5_gcp import GCPC5
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_aws import CCC_AWS
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_azure import CCC_Azure
|
||||
from prowler.lib.outputs.compliance.ccc.ccc_gcp import CCC_GCP
|
||||
from prowler.lib.outputs.compliance.c5.c5_aws import AWSC5
|
||||
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
@@ -682,6 +684,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(ccc_azure)
|
||||
ccc_azure.batch_write_data_to_file()
|
||||
elif compliance_name == "c5_azure":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
c5_azure = AzureC5(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(c5_azure)
|
||||
c5_azure.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
@@ -773,6 +787,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(ccc_gcp)
|
||||
ccc_gcp.batch_write_data_to_file()
|
||||
elif compliance_name == "c5_gcp":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
c5_gcp = GCPC5(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(c5_gcp)
|
||||
c5_gcp.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -438,8 +438,16 @@ def execute_checks(
|
||||
service = check_name.split("_")[0]
|
||||
try:
|
||||
try:
|
||||
# Map CLI provider names to directory names (for cases where they differ)
|
||||
provider_directory_map = {
|
||||
"oci": "oraclecloud", # OCI SDK conflict avoidance
|
||||
}
|
||||
provider_directory = provider_directory_map.get(
|
||||
global_provider.type, global_provider.type
|
||||
)
|
||||
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{global_provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
check_module_path = f"prowler.providers.{provider_directory}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.c5.models import AzureC5Model
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class AzureC5(ComplianceOutput):
|
||||
"""
|
||||
This class represents the Azure C5 compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into Azure C5 compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into Azure C5 compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = AzureC5Model(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
SubscriptionId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Type=attribute.Type,
|
||||
Requirements_Attributes_AboutCriteria=attribute.AboutCriteria,
|
||||
Requirements_Attributes_ComplementaryCriteria=attribute.ComplementaryCriteria,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = AzureC5Model(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
SubscriptionId="",
|
||||
Location="",
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Type=attribute.Type,
|
||||
Requirements_Attributes_AboutCriteria=attribute.AboutCriteria,
|
||||
Requirements_Attributes_ComplementaryCriteria=attribute.ComplementaryCriteria,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -0,0 +1,92 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.c5.models import GCPC5Model
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
|
||||
|
||||
class GCPC5(ComplianceOutput):
|
||||
"""
|
||||
This class represents the GCP C5 compliance output.
|
||||
|
||||
Attributes:
|
||||
- _data (list): A list to store transformed data from findings.
|
||||
- _file_descriptor (TextIOWrapper): A file descriptor to write data to a file.
|
||||
|
||||
Methods:
|
||||
- transform: Transforms findings into GCP C5 compliance format.
|
||||
"""
|
||||
|
||||
def transform(
|
||||
self,
|
||||
findings: list[Finding],
|
||||
compliance: Compliance,
|
||||
compliance_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Transforms a list of findings into GCP C5 compliance format.
|
||||
|
||||
Parameters:
|
||||
- findings (list): A list of findings.
|
||||
- compliance (Compliance): A compliance model.
|
||||
- compliance_name (str): The name of the compliance model.
|
||||
|
||||
Returns:
|
||||
- None
|
||||
"""
|
||||
for finding in findings:
|
||||
# Get the compliance requirements for the finding
|
||||
finding_requirements = finding.compliance.get(compliance_name, [])
|
||||
for requirement in compliance.Requirements:
|
||||
if requirement.Id in finding_requirements:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = GCPC5Model(
|
||||
Provider=finding.provider,
|
||||
Description=compliance.Description,
|
||||
ProjectId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Type=attribute.Type,
|
||||
Requirements_Attributes_AboutCriteria=attribute.AboutCriteria,
|
||||
Requirements_Attributes_ComplementaryCriteria=attribute.ComplementaryCriteria,
|
||||
Status=finding.status,
|
||||
StatusExtended=finding.status_extended,
|
||||
ResourceId=finding.resource_uid,
|
||||
ResourceName=finding.resource_name,
|
||||
CheckId=finding.check_id,
|
||||
Muted=finding.muted,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
# Add manual requirements to the compliance output
|
||||
for requirement in compliance.Requirements:
|
||||
if not requirement.Checks:
|
||||
for attribute in requirement.Attributes:
|
||||
compliance_row = GCPC5Model(
|
||||
Provider=compliance.Provider.lower(),
|
||||
Description=compliance.Description,
|
||||
ProjectId="",
|
||||
Location="",
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Type=attribute.Type,
|
||||
Requirements_Attributes_AboutCriteria=attribute.AboutCriteria,
|
||||
Requirements_Attributes_ComplementaryCriteria=attribute.ComplementaryCriteria,
|
||||
Status="MANUAL",
|
||||
StatusExtended="Manual check",
|
||||
ResourceId="manual_check",
|
||||
ResourceName="Manual check",
|
||||
CheckId="manual",
|
||||
Muted=False,
|
||||
Framework=compliance.Framework,
|
||||
Name=compliance.Name,
|
||||
)
|
||||
self._data.append(compliance_row)
|
||||
@@ -28,3 +28,57 @@ class AWSC5Model(BaseModel):
|
||||
Muted: bool
|
||||
Framework: str
|
||||
Name: str
|
||||
|
||||
|
||||
class AzureC5Model(BaseModel):
|
||||
"""
|
||||
AzureC5Model generates a finding's output in Azure C5 Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
SubscriptionId: str
|
||||
Location: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: str = None
|
||||
Requirements_Attributes_Type: str = None
|
||||
Requirements_Attributes_AboutCriteria: Optional[str] = None
|
||||
Requirements_Attributes_ComplementaryCriteria: Optional[str] = None
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
Framework: str
|
||||
Name: str
|
||||
|
||||
|
||||
class GCPC5Model(BaseModel):
|
||||
"""
|
||||
GCPC5Model generates a finding's output in GCP C5 Compliance format.
|
||||
"""
|
||||
|
||||
Provider: str
|
||||
Description: str
|
||||
ProjectId: str
|
||||
Location: str
|
||||
AssessmentDate: str
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: str = None
|
||||
Requirements_Attributes_Type: str = None
|
||||
Requirements_Attributes_AboutCriteria: Optional[str] = None
|
||||
Requirements_Attributes_ComplementaryCriteria: Optional[str] = None
|
||||
Status: str
|
||||
StatusExtended: str
|
||||
ResourceId: str
|
||||
ResourceName: str
|
||||
CheckId: str
|
||||
Muted: bool
|
||||
Framework: str
|
||||
Name: str
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.ccc.models import CCC_AWSModel
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
@@ -44,7 +45,7 @@ class CCC_AWS(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
AccountId=finding.account_uid,
|
||||
Region=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
@@ -73,7 +74,7 @@ class CCC_AWS(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
AccountId="",
|
||||
Region="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.ccc.models import CCC_AzureModel
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
@@ -44,7 +45,7 @@ class CCC_Azure(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
SubscriptionId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
@@ -73,7 +74,7 @@ class CCC_Azure(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
SubscriptionId="",
|
||||
Location="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from prowler.config.config import timestamp
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.compliance.ccc.models import CCC_GCPModel
|
||||
from prowler.lib.outputs.compliance.compliance_output import ComplianceOutput
|
||||
@@ -44,7 +45,7 @@ class CCC_GCP(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
ProjectId=finding.account_uid,
|
||||
Location=finding.region,
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
@@ -73,7 +74,7 @@ class CCC_GCP(ComplianceOutput):
|
||||
Description=compliance.Description,
|
||||
ProjectId="",
|
||||
Location="",
|
||||
AssessmentDate=str(finding.timestamp),
|
||||
AssessmentDate=str(timestamp),
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_FamilyName=attribute.FamilyName,
|
||||
|
||||
@@ -407,6 +407,8 @@ class Finding(BaseModel):
|
||||
finding.subscription = list(provider.identity.subscriptions.keys())[0]
|
||||
elif provider.type == "gcp":
|
||||
finding.project_id = list(provider.projects.keys())[0]
|
||||
elif provider.type == "oci":
|
||||
finding.compartment_id = getattr(finding, "compartment_id", "")
|
||||
|
||||
finding.check_metadata = CheckMetadata(
|
||||
Provider=finding.check_metadata["provider"],
|
||||
|
||||
@@ -271,8 +271,16 @@ class Scan:
|
||||
# Recover service from check name
|
||||
service = get_service_name_from_check_name(check_name)
|
||||
try:
|
||||
# Map CLI provider names to directory names (for cases where they differ)
|
||||
provider_directory_map = {
|
||||
"oci": "oraclecloud", # OCI SDK conflict avoidance
|
||||
}
|
||||
provider_directory = provider_directory_map.get(
|
||||
self._provider.type, self._provider.type
|
||||
)
|
||||
|
||||
# Import check module
|
||||
check_module_path = f"prowler.providers.{self._provider.type}.services.{service}.{check_name}.{check_name}"
|
||||
check_module_path = f"prowler.providers.{provider_directory}.services.{service}.{check_name}.{check_name}"
|
||||
lib = import_check(check_module_path)
|
||||
# Recover functions from check
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
|
||||
+23
-12
@@ -1,31 +1,42 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_alarm_actions_alarm_state_configured",
|
||||
"CheckTitle": "Check if CloudWatch alarms have specified actions configured for the ALARM state.",
|
||||
"CheckTitle": "CloudWatch metric alarm has actions configured for the ALARM state",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:aws:cloudwatch:region:account-id:alarm/alarm-name",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "This control checks whether an Amazon CloudWatch alarm has at least one action configured for the ALARM state. The control fails if the alarm doesn't have an action configured for the ALARM state.",
|
||||
"Risk": "Without an action configured for the ALARM state, the CloudWatch alarm will not notify you or take any predefined action when a monitored metric goes beyond the defined threshold, potentially delaying responses to critical events.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-actions",
|
||||
"Description": "Amazon CloudWatch metric alarms are evaluated for **actions** configured for the `ALARM` state. The finding flags alarms that have no action to execute when their monitored metric crosses its threshold.",
|
||||
"Risk": "Without an **ALARM action**, threshold breaches trigger no **notification** or **automated response**. This delays detection and containment, risking:\n- Availability: prolonged outages or missed scale-out\n- Integrity/confidentiality: unchecked anomalies enabling tampering or data loss",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-actions",
|
||||
"https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch/client/put_metric_alarm.html",
|
||||
"https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/cloudwatch-controls.html#cloudwatch-15",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000233431-ensure-cloudwatch-alarms-have-specified-actions-configured-for-the-alarm-state",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudWatch/cloudwatch-alarm-action.html",
|
||||
"https://awscli.amazonaws.com/v2/documentation/api/2.0.34/reference/cloudwatch/put-metric-alarm.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws cloudwatch put-metric-alarm --alarm-name <alarm-name> --alarm-actions <action-arn>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudwatch-controls.html#cloudwatch-15",
|
||||
"Terraform": ""
|
||||
"CLI": "aws cloudwatch put-metric-alarm --alarm-name <alarm-name> --metric-name <metric-name> --namespace <namespace> --statistic <statistic> --period <period-seconds> --evaluation-periods <evaluation-periods> --threshold <threshold> --comparison-operator <comparison-operator> --alarm-actions <action-arn>",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: add an ALARM action to a metric alarm\nResources:\n <example_resource_name>:\n Type: AWS::CloudWatch::Alarm\n Properties:\n AlarmName: <example_resource_name>\n MetricName: <metric-name>\n Namespace: <namespace>\n Statistic: Average\n Period: 60\n EvaluationPeriods: 1\n Threshold: 1\n ComparisonOperator: GreaterThanThreshold\n AlarmActions:\n - <action-arn> # CRITICAL: adds an action for ALARM state so the check passes\n```",
|
||||
"Other": "1. Open the AWS Console and go to CloudWatch > Alarms\n2. Select the target alarm and choose Edit (or Modify alarm)\n3. In Actions, under When alarm state is ALARM, add an action (e.g., select an SNS topic or other supported action)\n4. Click Save changes",
|
||||
"Terraform": "```hcl\n# Terraform: add an ALARM action to a metric alarm\nresource \"aws_cloudwatch_metric_alarm\" \"<example_resource_name>\" {\n alarm_name = \"<example_resource_name>\"\n metric_name = \"<metric-name>\"\n namespace = \"<namespace>\"\n statistic = \"Average\"\n period = 60\n evaluation_periods = 1\n threshold = 1\n comparison_operator = \"GreaterThanThreshold\"\n alarm_actions = [\"<action-arn>\"] # CRITICAL: ensures an action is configured for ALARM state\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure your CloudWatch alarms to trigger actions, such as sending notifications via Amazon SNS, when the alarm state changes to ALARM.",
|
||||
"Url": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudWatch/cloudwatch-alarm-action.html"
|
||||
"Text": "Assign at least one **ALARM-state action** per alarm (e.g., notify via SNS or run automated remediation with Lambda/SSM). Keep actions enabled, apply **least privilege** to targets, and regularly test. *For critical metrics*, add redundant paths (EventBridge) for **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_alarm_actions_alarm_state_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"resilience"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
+21
-12
@@ -1,31 +1,40 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_alarm_actions_enabled",
|
||||
"CheckTitle": "Check if CloudWatch alarms have actions enabled",
|
||||
"CheckTitle": "CloudWatch metric alarm has actions enabled",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"TTPs/Defense Evasion"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:aws:cloudwatch:region:account-id:alarm/alarm-name",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Alarm actions automatically alert you when a monitored metric is outside the defined threshold. If the alarm action is deactivated, no actions are run when the alarm changes state, and you won't be alerted to changes in monitored metrics. We recommend activating CloudWatch alarm actions to help you quickly respond to security and operational issues.",
|
||||
"Risk": "Without active alarm actions, you may not be alerted to security or operational issues, potentially leading to delayed responses and increased risk.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-actions",
|
||||
"Description": "**CloudWatch metric alarms** are evaluated for **alarm actions** activation (`actions_enabled: true`), enabling state changes to invoke configured notifications or automated responses.",
|
||||
"Risk": "With alarm actions disabled, state changes neither notify nor remediate. Incidents can persist unnoticed, enabling unauthorized activity, configuration drift, or capacity exhaustion. Visibility drops, MTTR rises, and confidentiality, integrity, and availability are all at greater risk.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudWatch/cloudwatch-alarm-action-activated.html",
|
||||
"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-actions",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/cloudwatch-controls.html#cloudwatch-17"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws cloudwatch enable-alarm-actions --alarm-names <alarm-name>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/cloudwatch-controls.html#cloudwatch-17",
|
||||
"Terraform": ""
|
||||
"NativeIaC": "```yaml\nResources:\n <example_resource_name>:\n Type: AWS::CloudWatch::Alarm\n Properties:\n ActionsEnabled: true # FIX: activates alarm actions so the check passes\n ComparisonOperator: GreaterThanThreshold\n EvaluationPeriods: 1\n MetricName: <example_metric_name>\n Namespace: <example_metric_namespace>\n Period: 60\n Statistic: Average\n Threshold: 1\n```",
|
||||
"Other": "1. Open the CloudWatch console\n2. Go to Alarms > All alarms and select the alarm\n3. Choose Actions > Alarm actions - new > Enable\n4. Confirm to activate actions",
|
||||
"Terraform": "```hcl\nresource \"aws_cloudwatch_metric_alarm\" \"<example_resource_name>\" {\n alarm_name = \"<example_resource_name>\"\n comparison_operator = \"GreaterThanThreshold\"\n evaluation_periods = 1\n metric_name = \"<example_metric_name>\"\n namespace = \"<example_metric_namespace>\"\n period = 60\n statistic = \"Average\"\n threshold = 1\n\n actions_enabled = true # FIX: activates alarm actions so the check passes\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that all CloudWatch alarms have at least one action configured. This can include sending notifications to SNS topics, invoking Lambda functions, or triggering other AWS services.",
|
||||
"Url": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudWatch/cloudwatch-alarm-action-activated.html"
|
||||
"Text": "Enable `actions_enabled` on critical alarms and attach least-privilege actions (SNS, automation) for ALARM and recovery states. Use redundant targets, regularly test notifications, and integrate with incident response. Apply **defense in depth** with complementary detections to ensure timely, reliable alerting.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_alarm_actions_enabled"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"resilience"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
+23
-12
@@ -1,31 +1,42 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_changes_to_network_acls_alarm_configured",
|
||||
"CheckTitle": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL).",
|
||||
"CheckTitle": "CloudWatch log metric filter and alarm exist for Network ACL (NACL) change events",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL).",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"Description": "CloudTrail records for **Network ACL changes** are matched by a CloudWatch Logs metric filter with an associated alarm for events like `CreateNetworkAcl`, `CreateNetworkAclEntry`, `DeleteNetworkAcl`, `DeleteNetworkAclEntry`, `ReplaceNetworkAclEntry`, and `ReplaceNetworkAclAssociation`.",
|
||||
"Risk": "Absent monitoring of **NACL changes** reduces detection of policy tampering, risking loss of **confidentiality** (opened ingress/egress), degraded network **integrity** (lateral movement, bypassed segmentation), and reduced **availability** (traffic blackholes or lockouts).",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"https://www.clouddefense.ai/compliance-rules/cis-v130/monitoring/cis-v130-4-11",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000084031-ensure-a-log-metric-filter-and-alarm-exist-for-changes-to-network-access-control-lists-nacl-",
|
||||
"https://trendmicro.com/cloudoneconformity/knowledge-base/aws/CloudWatchLogs/network-acl-changes-alarm.html",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000233134-4-11-ensure-network-access-control-list-nacl-changes-are-monitored-manual-"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_11",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_11#fix---buildtime"
|
||||
"NativeIaC": "```yaml\n# CloudFormation to alert on NACL changes\nResources:\n MetricFilter:\n Type: AWS::Logs::MetricFilter\n Properties:\n LogGroupName: \"<example_resource_name>\" # CRITICAL: CloudTrail log group to monitor\n FilterPattern: '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' # CRITICAL: detects NACL changes\n MetricTransformations:\n - MetricValue: \"1\"\n MetricNamespace: \"CISBenchmark\"\n MetricName: \"nacl_changes\"\n\n NaclChangesAlarm:\n Type: AWS::CloudWatch::Alarm\n Properties:\n AlarmName: \"nacl_changes\"\n ComparisonOperator: GreaterThanOrEqualToThreshold\n EvaluationPeriods: 1\n MetricName: \"nacl_changes\" # CRITICAL: alarm targets the metric from the filter\n Namespace: \"CISBenchmark\"\n Period: 300\n Statistic: Sum\n Threshold: 1\n```",
|
||||
"Other": "1. In the AWS Console, go to CloudWatch > Log groups and open the CloudTrail log group\n2. Metric filters tab > Create metric filter\n3. Set Filter pattern to:\n { ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\n4. Next > Filter name: nacl_changes; Metric namespace: CISBenchmark; Metric name: nacl_changes; Metric value: 1 > Create metric filter\n5. Select the new metric filter > Create alarm\n6. Set Statistic: Sum, Period: 5 minutes, Threshold type: Static, Condition: Greater/Equal, Threshold: 1\n7. Next through actions (optional) > Name: nacl_changes > Create alarm",
|
||||
"Terraform": "```hcl\n# CloudWatch metric filter and alarm for NACL changes\nresource \"aws_cloudwatch_log_metric_filter\" \"nacl\" {\n name = \"nacl_changes\"\n log_group_name = \"<example_resource_name>\" # CloudTrail log group\n pattern = \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" # CRITICAL: detects NACL changes\n\n metric_transformation {\n name = \"nacl_changes\"\n namespace = \"CISBenchmark\"\n value = \"1\"\n }\n}\n\nresource \"aws_cloudwatch_metric_alarm\" \"nacl\" {\n alarm_name = \"nacl_changes\"\n comparison_operator = \"GreaterThanOrEqualToThreshold\"\n evaluation_periods = 1\n metric_name = \"nacl_changes\" # CRITICAL: alarm targets the metric from the filter\n namespace = \"CISBenchmark\"\n period = 300\n statistic = \"Sum\"\n threshold = 1\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "It is recommended that a metric filter and alarm be established for unauthorized requests.",
|
||||
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
"Text": "Implement a CloudWatch Logs metric filter and alarm for NACL change events from CloudTrail and route alerts to responders. Enforce **least privilege** on NACL management, require **change control**, and use **defense in depth** with configuration monitoring and flow logs to validate and monitor network posture.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_changes_to_network_acls_alarm_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"threat-detection"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "Logging and Monitoring"
|
||||
|
||||
+24
-12
@@ -1,31 +1,43 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_changes_to_network_gateways_alarm_configured",
|
||||
"CheckTitle": "Ensure a log metric filter and alarm exist for changes to network gateways.",
|
||||
"CheckTitle": "CloudWatch Logs metric filter and alarm exist for changes to network gateways",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
|
||||
"TTPs/Command and Control"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for changes to network gateways.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"Description": "CloudWatch log metric filters and alarms for **network gateway changes** are identified by matching CloudTrail events such as `CreateCustomerGateway`, `DeleteCustomerGateway`, `AttachInternetGateway`, `CreateInternetGateway`, `DeleteInternetGateway`, and `DetachInternetGateway` in log groups that receive trail logs.",
|
||||
"Risk": "Without this monitoring, gateway changes can expose private networks to the Internet or break connectivity. Adversaries or mistakes can enable data exfiltration, bypass network inspection, and trigger outages via deletions or detachments, impacting **confidentiality** and **availability**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000083807-ensure-a-log-metric-filter-and-alarm-exist-for-changes-to-network-gateways",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/cloudwatch-controls.html#cloudwatch-12",
|
||||
"https://paper.bobylive.com/Security/CIS/CIS_Amazon_Web_Services_Foundations_Benchmark_v1_3_0.pdf"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_12",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_12#fix---buildtime"
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Create metric filter and alarm for network gateway changes\nResources:\n NetworkGatewayMetricFilter:\n Type: AWS::Logs::MetricFilter\n Properties:\n LogGroupName: <example_resource_name>\n FilterPattern: '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' # Critical: matches gateway change events\n MetricTransformations:\n - MetricName: <example_resource_name>\n MetricNamespace: <example_resource_name>\n MetricValue: \"1\"\n\n NetworkGatewayAlarm:\n Type: AWS::CloudWatch::Alarm\n Properties:\n ComparisonOperator: GreaterThanOrEqualToThreshold\n EvaluationPeriods: 1\n MetricName: <example_resource_name> # Critical: alarm targets the metric created by the filter\n Namespace: <example_resource_name>\n Period: 300\n Statistic: Sum\n Threshold: 1\n```",
|
||||
"Other": "1. In the AWS Console, go to CloudWatch > Logs > Log groups and open the CloudTrail log group\n2. Create metric filter:\n - Filter pattern: { ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\n - Metric name: <example_resource_name>\n - Metric namespace: <example_resource_name>\n - Metric value: 1\n3. From the filter, choose Create alarm:\n - Statistic: Sum, Period: 5 minutes, Threshold: >= 1, Evaluation periods: 1\n - Create the alarm (actions optional)\n",
|
||||
"Terraform": "```hcl\n# CloudWatch Logs metric filter for network gateway changes\nresource \"aws_cloudwatch_log_metric_filter\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n log_group_name = \"<example_resource_name>\"\n pattern = \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" # Critical: matches gateway change events\n\n metric_transformation {\n name = \"<example_resource_name>\"\n namespace = \"<example_resource_name>\"\n value = \"1\"\n }\n}\n\n# Alarm on the metric filter\nresource \"aws_cloudwatch_metric_alarm\" \"<example_resource_name>\" {\n alarm_name = \"<example_resource_name>\"\n comparison_operator = \"GreaterThanOrEqualToThreshold\"\n evaluation_periods = 1\n metric_name = \"<example_resource_name>\" # Critical: must match metric from the filter\n namespace = \"<example_resource_name>\"\n period = 300\n statistic = \"Sum\"\n threshold = 1\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "It is recommended that a metric filter and alarm be established for unauthorized requests.",
|
||||
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
"Text": "Send CloudTrail to CloudWatch Logs and create a metric filter for the listed gateway events with an alarm that notifies responders. Enforce **least privilege** for gateway modifications, require change approvals, and route alerts to monitored channels as part of **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_changes_to_network_gateways_alarm_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"threat-detection"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "Logging and Monitoring"
|
||||
|
||||
+21
-12
@@ -1,31 +1,40 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_changes_to_network_route_tables_alarm_configured",
|
||||
"CheckTitle": "Ensure route table changes are monitored",
|
||||
"CheckTitle": "Account monitors VPC route table changes with a CloudWatch Logs metric filter and alarm",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"TTPs/Defense Evasion",
|
||||
"Effects/Data Exfiltration"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing Cloud Trail Logs to CloudWatch Logs, or an external Security information and event management (SIEM)environment, and establishing corresponding metric filters and alarms. Routing tablesare used to route network traffic between subnets and to network gateways. It isrecommended that a metric filter and alarm be established for changes to route tables.",
|
||||
"Risk": "CloudWatch is an AWS native service that allows you to ob serve and monitor resources and applications. CloudTrail Logs can also be sent to an external Security informationand event management (SIEM) environment for monitoring and alerting.Monitoring changes to route tables will help ensure that all VPC traffic flows through anexpected path and prevent any accidental or intentional modifications that may lead touncontrolled network traffic. An alarm should be triggered every time an AWS API call isperformed to create, replace, delete, or disassociate a Route Table.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"Description": "**VPC route table changes** are captured from **CloudTrail logs** by a **CloudWatch Logs metric filter** with an associated **alarm** for events like `CreateRoute`, `CreateRouteTable`, `ReplaceRoute`, `ReplaceRouteTableAssociation`, `DeleteRoute`, `DeleteRouteTable`, and `DisassociateRouteTable`.",
|
||||
"Risk": "Without monitoring of **route table changes**, unauthorized or accidental edits can redirect traffic, bypass inspection, or blackhole routes, impacting **confidentiality** (exfiltration), **integrity** (tampered paths), and **availability** (outages from misrouted traffic).",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_13",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_13#fix---buildtime"
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Metric filter + alarm for VPC route table changes\nResources:\n RouteTableChangeMetricFilter:\n Type: AWS::Logs::MetricFilter\n Properties:\n LogGroupName: \"<example_resource_name>\"\n # CRITICAL: Detect EC2 route table change events in CloudTrail logs\n # Includes eventSource and the required eventNames\n FilterPattern: '{($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable))}'\n MetricTransformations:\n - MetricValue: \"1\"\n MetricNamespace: \"<example_resource_name>\"\n MetricName: \"<example_resource_name>\"\n\n RouteTableChangeAlarm:\n Type: AWS::CloudWatch::Alarm\n Properties:\n # CRITICAL: Alarm on the metric from the filter above\n Namespace: \"<example_resource_name>\"\n MetricName: \"<example_resource_name>\"\n ComparisonOperator: GreaterThanOrEqualToThreshold\n EvaluationPeriods: 1\n Period: 300\n Statistic: Sum\n Threshold: 1\n```",
|
||||
"Other": "1. In the AWS console, open CloudWatch > Log groups and select your CloudTrail log group\n2. Go to Metric filters > Create metric filter\n3. Set Filter pattern to:\n {($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable))}\n4. Name the metric and set Metric value to 1; choose any namespace/name\n5. Create the filter\n6. From the filter, click Create alarm\n7. Set Statistic: Sum, Period: 5 minutes, Threshold type: Static, Threshold: 1, Whenever: Greater/Equal\n8. Create the alarm (notifications optional)",
|
||||
"Terraform": "```hcl\n# Metric filter + alarm for VPC route table changes\nresource \"aws_cloudwatch_log_metric_filter\" \"routes\" {\n name = \"<example_resource_name>\"\n log_group_name = \"<example_resource_name>\"\n # CRITICAL: Detect EC2 route table change events in CloudTrail logs\n pattern = \"{($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable))}\"\n\n metric_transformation {\n name = \"<example_resource_name>\"\n namespace = \"<example_resource_name>\"\n value = \"1\"\n }\n}\n\nresource \"aws_cloudwatch_metric_alarm\" \"routes\" {\n alarm_name = \"<example_resource_name>\"\n # CRITICAL: Alarm targets the metric from the filter above\n metric_name = \"<example_resource_name>\"\n namespace = \"<example_resource_name>\"\n comparison_operator = \"GreaterThanOrEqualToThreshold\"\n evaluation_periods = 1\n period = 300\n statistic = \"Sum\"\n threshold = 1\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "If you are using CloudTrails and CloudWatch, perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the <cloudtrail_log_group_name> taken from audit step 1. aws logs put-metric-filter --log-group-name <cloudtrail_log_group_name> -- filter-name `<route_table_changes_metric>` --metric-transformations metricName= `<route_table_changes_metric>` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = ec2.amazonaws.com) && (($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable)) }' Note: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify aws sns create-topic --name <sns_topic_name> Note: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 aws sns subscribe --topic-arn <sns_topic_arn> --protocol <protocol_for_sns> - -notification-endpoint <sns_subscription_endpoints> Note: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 aws cloudwatch put-metric-alarm --alarm-name `<route_table_changes_alarm>` --metric-name `<route_table_changes_metric>` --statistic Sum --period 300 - -threshold 1 --comparison-operator GreaterThanOrEqualToThreshold -- evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions <sns_topic_arn>",
|
||||
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
"Text": "Implement a **CloudWatch Logs metric filter and alarm** on CloudTrail for these route table events and notify responders. Enforce **least privilege** for route modifications, require **change control**, and apply **defense in depth** with VPC Flow Logs and guardrails to prevent and quickly contain unsafe routing changes.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_changes_to_network_route_tables_alarm_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"threat-detection"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
+17
-11
@@ -1,31 +1,37 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "cloudwatch_changes_to_vpcs_alarm_configured",
|
||||
"CheckTitle": "Ensure a log metric filter and alarm exist for VPC changes.",
|
||||
"CheckTitle": "AWS account has a CloudWatch Logs metric filter and alarm for VPC changes",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"
|
||||
],
|
||||
"ServiceName": "cloudwatch",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsCloudWatchAlarm",
|
||||
"Description": "Ensure a log metric filter and alarm exist for VPC changes.",
|
||||
"Risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html",
|
||||
"Description": "**CloudTrail events** for **VPC configuration changes** are captured in CloudWatch Logs with a metric filter and an associated alarm. The filter targets actions like `CreateVpc`, `DeleteVpc`, `ModifyVpcAttribute`, and VPC peering operations to surface when network topology is altered.",
|
||||
"Risk": "Without alerting on VPC changes, unauthorized or accidental edits to routes, peering, or attributes can go unnoticed, exposing private networks and enabling data exfiltration (C), lateral movement and traffic tampering (I), and outages from misrouted or bridged networks (A).",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_14",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/monitoring-policies/monitoring_14#fix---buildtime"
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Create a metric filter and alarm for VPC changes\nResources:\n VPCChangesMetricFilter:\n Type: AWS::Logs::MetricFilter\n Properties:\n LogGroupName: <example_log_group_name>\n FilterPattern: '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' # Critical: matches VPC change events\n MetricTransformations:\n - MetricName: vpc_changes_metric\n MetricNamespace: CISBenchmark\n MetricValue: \"1\" # Critical: emits a metric on matching events\n\n VPCChangesAlarm:\n Type: AWS::CloudWatch::Alarm\n Properties:\n MetricName: vpc_changes_metric # Critical: alarm monitors the metric above\n Namespace: CISBenchmark\n Statistic: Sum\n Period: 300\n EvaluationPeriods: 1\n Threshold: 1\n ComparisonOperator: GreaterThanOrEqualToThreshold\n```",
|
||||
"Other": "1. In the AWS Console, go to CloudWatch > Log groups and open the CloudTrail log group\n2. Choose Create metric filter\n3. For Filter pattern, paste:\n { ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\n4. Name the filter and set Metric namespace to CISBenchmark, Metric name to vpc_changes_metric, Metric value to 1; create the filter\n5. Select the new filter and choose Create alarm\n6. Set Statistic to Sum, Period 5 minutes, Threshold type Static, Whenever Greater/Equal 1, Evaluation periods 1\n7. Create the alarm (actions/notifications are optional and not required for pass)\n",
|
||||
"Terraform": "```hcl\n# Metric filter for VPC changes\nresource \"aws_cloudwatch_log_metric_filter\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n log_group_name = \"<example_log_group_name>\"\n pattern = \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" # Critical: matches VPC change events\n\n metric_transformation {\n name = \"<example_resource_name>\" # Critical: metric created by the filter\n namespace = \"CISBenchmark\"\n value = \"1\"\n }\n}\n\n# Alarm on the VPC changes metric\nresource \"aws_cloudwatch_metric_alarm\" \"<example_resource_name>\" {\n metric_name = \"<example_resource_name>\" # Critical: alarm monitors the filter's metric\n namespace = \"CISBenchmark\"\n statistic = \"Sum\"\n period = 300\n evaluation_periods = 1\n threshold = 1\n comparison_operator = \"GreaterThanOrEqualToThreshold\"\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "It is recommended that a metric filter and alarm be established for unauthorized requests.",
|
||||
"Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html"
|
||||
"Text": "Create a CloudWatch Logs metric filter and alarm on CloudTrail for critical **VPC change events**, and notify responders. Apply **least privilege** to network changes, require change approvals, and use **defense in depth** (segmentation, route controls) to prevent and contain unauthorized modifications.",
|
||||
"Url": "https://hub.prowler.com/check/cloudwatch_changes_to_vpcs_alarm_configured"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"threat-detection"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "Logging and Monitoring"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user