Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bede10508f | |||
| b4fe70efa5 | |||
| 557284efb1 | |||
| 0204d1e02d | |||
| 9bd4e4b65c | |||
| 8ef326b276 | |||
| 68ffb2b219 | |||
| 739be07077 | |||
| 0abbb7fc59 | |||
| 0b4393776c | |||
| 4dd5baadf6 | |||
| 934d995661 | |||
| ccdc01ed7b | |||
| d84099e87a | |||
| cf55f7eb43 | |||
| 9293c7b58d | |||
| a883bb30d4 | |||
| e476bbde2d | |||
| 7f3dcdf02f | |||
| 132e79df89 | |||
| b2ed9ee221 | |||
| def2d3d188 |
@@ -72,6 +72,11 @@ provider/vercel:
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/**"
|
||||
|
||||
provider/okta:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/providers/okta/**"
|
||||
- any-glob-to-any-file: "tests/providers/okta/**"
|
||||
|
||||
github_actions:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: ".github/workflows/*"
|
||||
@@ -109,6 +114,8 @@ mutelist:
|
||||
- any-glob-to-any-file: "tests/providers/googleworkspace/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/okta/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/okta/lib/mutelist/**"
|
||||
|
||||
integration/s3:
|
||||
- changed-files:
|
||||
|
||||
@@ -36,6 +36,7 @@ Please add a detailed description of how to review this PR.
|
||||
|
||||
#### UI
|
||||
- [ ] All issue/task requirements work as expected on the UI
|
||||
- [ ] If this PR adds or updates npm dependencies, include package-health evidence (maintenance, popularity, known vulnerabilities, license, release age) and explain why existing/native alternatives are insufficient.
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Mobile (X < 640px)
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Table (640px > X < 1024px)
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Desktop (X > 1024px)
|
||||
|
||||
@@ -324,6 +324,30 @@ jobs:
|
||||
flags: prowler-py${{ matrix.python-version }}-github
|
||||
files: ./github_coverage.xml
|
||||
|
||||
# Okta Provider
|
||||
- name: Check if Okta files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-okta
|
||||
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/okta/**
|
||||
./tests/**/okta/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Okta tests
|
||||
if: steps.changed-okta.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/okta --cov-report=xml:okta_coverage.xml tests/providers/okta
|
||||
|
||||
- name: Upload Okta coverage to Codecov
|
||||
if: steps.changed-okta.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-okta
|
||||
files: ./okta_coverage.xml
|
||||
|
||||
# NHN Provider
|
||||
- name: Check if NHN files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -132,6 +132,10 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pnpm run healthcheck
|
||||
|
||||
- name: Run pnpm audit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pnpm run audit
|
||||
|
||||
- name: Run unit tests (all - critical paths changed)
|
||||
if: steps.check-changes.outputs.any_changed == 'true' && steps.critical-changes.outputs.any_changed == 'true'
|
||||
run: |
|
||||
|
||||
@@ -15,7 +15,7 @@ Use these skills for detailed patterns on-demand:
|
||||
|-------|-------------|-----|
|
||||
| `typescript` | Const types, flat interfaces, utility types | [SKILL.md](skills/typescript/SKILL.md) |
|
||||
| `react-19` | No useMemo/useCallback, React Compiler | [SKILL.md](skills/react-19/SKILL.md) |
|
||||
| `nextjs-15` | App Router, Server Actions, streaming | [SKILL.md](skills/nextjs-15/SKILL.md) |
|
||||
| `nextjs-16` | App Router, Server Actions, proxy.ts, streaming | [SKILL.md](skills/nextjs-16/SKILL.md) |
|
||||
| `tailwind-4` | cn() utility, no var() in className | [SKILL.md](skills/tailwind-4/SKILL.md) |
|
||||
| `playwright` | Page Object Model, MCP workflow, selectors | [SKILL.md](skills/playwright/SKILL.md) |
|
||||
| `pytest` | Fixtures, mocking, markers, parametrize | [SKILL.md](skills/pytest/SKILL.md) |
|
||||
@@ -60,11 +60,14 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
|--------|-------|
|
||||
| Add changelog entry for a PR or feature | `prowler-changelog` |
|
||||
| Adding DRF pagination or permissions | `django-drf` |
|
||||
| Adding a compliance output formatter (per-provider class + table dispatcher) | `prowler-compliance` |
|
||||
| Adding indexes or constraints to database tables | `django-migration-psql` |
|
||||
| Adding new providers | `prowler-provider` |
|
||||
| Adding privilege escalation detection queries | `prowler-attack-paths-query` |
|
||||
| Adding services to existing providers | `prowler-provider` |
|
||||
| After creating/modifying a skill | `skill-sync` |
|
||||
| App Router / Server Actions | `nextjs-15` |
|
||||
| App Router / Server Actions | `nextjs-16` |
|
||||
| Auditing check-to-requirement mappings as a cloud auditor | `prowler-compliance` |
|
||||
| Building AI chat features | `ai-sdk-5` |
|
||||
| Committing changes | `prowler-commit` |
|
||||
| Configuring MCP servers in agentic workflows | `gh-aw` |
|
||||
@@ -78,6 +81,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Creating a git commit | `prowler-commit` |
|
||||
| Creating new checks | `prowler-sdk-check` |
|
||||
| Creating new skills | `skill-creator` |
|
||||
| Creating or reviewing Django migrations | `django-migration-psql` |
|
||||
| Creating/modifying Prowler UI components | `prowler-ui` |
|
||||
| Creating/modifying models, views, serializers | `prowler-api` |
|
||||
| Creating/updating compliance frameworks | `prowler-compliance` |
|
||||
@@ -85,6 +89,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Debugging gh-aw compilation errors | `gh-aw` |
|
||||
| Fill .github/pull_request_template.md (Context/Description/Steps to review/Checklist) | `prowler-pr` |
|
||||
| Fixing bug | `tdd` |
|
||||
| Fixing compliance JSON bugs (duplicate IDs, empty Section, stale refs) | `prowler-compliance` |
|
||||
| General Prowler development questions | `prowler` |
|
||||
| Implementing JSON:API endpoints | `django-drf` |
|
||||
| Implementing feature | `tdd` |
|
||||
@@ -102,6 +107,8 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Review changelog format and conventions | `prowler-changelog` |
|
||||
| Reviewing JSON:API compliance | `jsonapi` |
|
||||
| Reviewing compliance framework PRs | `prowler-compliance-review` |
|
||||
| Running makemigrations or pgmakemigrations | `django-migration-psql` |
|
||||
| Syncing compliance framework with upstream catalog | `prowler-compliance` |
|
||||
| Testing RLS tenant isolation | `prowler-test-api` |
|
||||
| Testing hooks or utilities | `vitest` |
|
||||
| Troubleshoot why a skill is missing from AGENTS.md auto-invoke | `skill-sync` |
|
||||
@@ -129,6 +136,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Writing React components | `react-19` |
|
||||
| Writing TypeScript types/interfaces | `typescript` |
|
||||
| Writing Vitest tests | `vitest` |
|
||||
| Writing data backfill or data migration | `django-migration-psql` |
|
||||
| Writing documentation | `prowler-docs` |
|
||||
| Writing unit tests for UI | `vitest` |
|
||||
|
||||
@@ -142,7 +150,7 @@ Prowler is an open-source cloud security assessment tool supporting AWS, Azure,
|
||||
|-----------|----------|------------|
|
||||
| SDK | `prowler/` | Python 3.10+, Poetry 2.3+ |
|
||||
| API | `api/` | Django 5.1, DRF, Celery |
|
||||
| UI | `ui/` | Next.js 15, React 19, Tailwind 4 |
|
||||
| UI | `ui/` | Next.js 16, React 19, Tailwind 4 |
|
||||
| MCP Server | `mcp_server/` | FastMCP, Python 3.12+ |
|
||||
| Dashboard | `dashboard/` | Dash, Plotly |
|
||||
|
||||
|
||||
@@ -117,9 +117,11 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 8 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| Image | N/A | N/A | N/A | N/A | Official | CLI, API |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | CLI |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | UI, API, CLI |
|
||||
| OpenStack | 34 | 5 | 0 | 9 | Official | UI, API, CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | UI, API, CLI |
|
||||
| Okta | 1 | 1 | 0 | 1 | Official | CLI |
|
||||
| Scaleway [Contact us](https://prowler.com/contact) | 1 | 1 | 0 | 1 | Unofficial | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
> [!Note]
|
||||
|
||||
@@ -14,6 +14,14 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [1.27.2] (Prowler UNRELEASED)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Attack Paths: BEDROCK-001 and BEDROCK-002 now target roles trusting `bedrock-agentcore.amazonaws.com` instead of `bedrock.amazonaws.com`, eliminating false positives against regular Bedrock service roles (Agents, Knowledge Bases, model invocation) [(#11141)](https://github.com/prowler-cloud/prowler/pull/11141)
|
||||
|
||||
---
|
||||
|
||||
## [1.27.1] (Prowler v5.26.1)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
@@ -484,8 +484,8 @@ AWS_BEDROCK_PRIVESC_PASSROLE_CODE_INTERPRETER = AttackPathsQueryDefinition(
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find roles that trust Bedrock service (can be passed to Bedrock)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {{arn: 'bedrock.amazonaws.com'}})
|
||||
// Find roles that trust the Bedrock AgentCore service (can be passed to a code interpreter)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {{arn: 'bedrock-agentcore.amazonaws.com'}})
|
||||
WHERE any(resource IN stmt_passrole.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
@@ -536,8 +536,8 @@ AWS_BEDROCK_PRIVESC_INVOKE_CODE_INTERPRETER = AttackPathsQueryDefinition(
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find roles that trust Bedrock service (already attached to existing code interpreters)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {{arn: 'bedrock.amazonaws.com'}})
|
||||
// Find roles that trust the Bedrock AgentCore service (already attached to existing code interpreters)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)-[:TRUSTS_AWS_PRINCIPAL]->(:AWSPrincipal {{arn: 'bedrock-agentcore.amazonaws.com'}})
|
||||
|
||||
WITH collect(path_principal) + collect(path_target) AS paths
|
||||
UNWIND paths AS p
|
||||
|
||||
@@ -0,0 +1,335 @@
|
||||
# AWS Inventory Connectivity Graph
|
||||
|
||||
A community-contributed tool that generates interactive connectivity graphs from Prowler AWS scans, visualizing relationships between AWS resources with zero additional API calls.
|
||||
|
||||
## Overview
|
||||
|
||||
This tool extends Prowler by producing two artifacts after a scan completes:
|
||||
|
||||
- **`<output>.inventory.json`** – Machine-readable graph (nodes + edges)
|
||||
- **`<output>.inventory.html`** – Interactive D3.js force-directed visualization
|
||||
|
||||
### Why?
|
||||
|
||||
Prowler's existing outputs (CSV, ASFF, OCSF, HTML) report individual check findings but provide no cross-service topology view. Security engineers need to understand **how** resources are connected—which Lambda functions sit inside which VPC, which IAM roles can be assumed by which services, which event sources trigger which functions—before they can reason about attack paths, blast-radius, or lateral-movement risk.
|
||||
|
||||
This tool fills that gap by building a connectivity graph from the service clients that are already loaded during a Prowler scan.
|
||||
|
||||
## Features
|
||||
|
||||
### Supported AWS Services
|
||||
|
||||
The tool currently extracts connectivity information from:
|
||||
|
||||
- **Lambda** – Functions, VPC/subnet/SG edges, event source mappings, layers, DLQ, KMS
|
||||
- **EC2** – Instances, security groups, subnet/VPC edges
|
||||
- **VPC** – VPCs, subnets, peering connections
|
||||
- **RDS** – DB instances, VPC/SG/cluster/KMS edges
|
||||
- **ELBv2** – ALB/NLB load balancers, SG and VPC edges
|
||||
- **S3** – Buckets, replication targets, logging buckets, KMS keys
|
||||
- **IAM** – Roles, trust-relationship edges (who can assume what)
|
||||
|
||||
### Edge Semantic Types
|
||||
|
||||
Edges are typed for downstream filtering and attack-path analysis:
|
||||
|
||||
- `network` – Resources share a network path (VPC/subnet/SG)
|
||||
- `iam` – IAM trust or permission relationship
|
||||
- `triggers` – One resource can invoke another (event source → Lambda)
|
||||
- `data_flow` – Data is written/read (Lambda → SQS dead-letter queue)
|
||||
- `depends_on` – Soft dependency (Lambda layer, subnet belongs to VPC)
|
||||
- `routes_to` – Traffic routing (LB → target)
|
||||
- `replicates_to` – S3 replication
|
||||
- `encrypts` – KMS key encrypts the resource
|
||||
- `logs_to` – Logging relationship
|
||||
|
||||
### Interactive HTML Graph Features
|
||||
|
||||
- Force-directed layout with drag-and-drop node pinning
|
||||
- Zoom / pan (mouse wheel + click-drag on background)
|
||||
- Per-service color-coded nodes with a legend
|
||||
- Hover tooltips showing ARN + all metadata properties
|
||||
- Service filter dropdown (show only Lambda, EC2, RDS, etc.)
|
||||
- Adjustable link-distance and charge-strength physics sliders
|
||||
- Edge labels on every arrow
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.9.1 or higher
|
||||
- Prowler installed and configured (see [Prowler documentation](https://docs.prowler.com/))
|
||||
|
||||
### Setup
|
||||
|
||||
1. Clone or download this directory to your local machine
|
||||
2. Ensure Prowler is installed and working
|
||||
3. No additional dependencies required beyond Prowler's existing requirements
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
Run Prowler with your desired checks, then use the inventory graph script:
|
||||
|
||||
```bash
|
||||
# Run Prowler scan (example)
|
||||
prowler aws --output-formats csv
|
||||
|
||||
# Generate inventory graph from the scan
|
||||
python contrib/inventory-graph/inventory_graph.py --output-directory ./output
|
||||
```
|
||||
|
||||
### Command-Line Options
|
||||
|
||||
```bash
|
||||
python contrib/inventory-graph/inventory_graph.py [OPTIONS]
|
||||
|
||||
Options:
|
||||
--output-directory DIR Directory to save output files (default: ./output)
|
||||
--output-filename NAME Base filename without extension (default: prowler-inventory-<timestamp>)
|
||||
--help Show this help message and exit
|
||||
```
|
||||
|
||||
### Example Workflow
|
||||
|
||||
```bash
|
||||
# 1. Run a Prowler scan on your AWS account
|
||||
prowler aws --profile my-aws-profile --output-formats csv html
|
||||
|
||||
# 2. Generate the inventory graph
|
||||
python contrib/inventory-graph/inventory_graph.py \
|
||||
--output-directory ./output \
|
||||
--output-filename my-aws-inventory
|
||||
|
||||
# 3. Open the HTML file in your browser
|
||||
open output/my-aws-inventory.inventory.html
|
||||
```
|
||||
|
||||
### Integration with Prowler Scans
|
||||
|
||||
The tool reads from already-loaded AWS service clients in memory (via `sys.modules`). This means:
|
||||
|
||||
- **Zero extra AWS API calls** – Uses data already collected during the Prowler scan
|
||||
- **Graceful degradation** – Services not scanned are silently skipped
|
||||
- **Flexible** – Works with any subset of Prowler checks
|
||||
|
||||
## Output Files
|
||||
|
||||
### JSON Output (`*.inventory.json`)
|
||||
|
||||
Machine-readable graph structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"generated_at": "2026-03-19T12:34:56Z",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "arn:aws:lambda:us-east-1:123456789012:function:my-function",
|
||||
"type": "lambda_function",
|
||||
"name": "my-function",
|
||||
"service": "lambda",
|
||||
"region": "us-east-1",
|
||||
"account_id": "123456789012",
|
||||
"properties": {
|
||||
"runtime": "python3.9",
|
||||
"vpc_id": "vpc-abc123"
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source_id": "arn:aws:lambda:...",
|
||||
"target_id": "arn:aws:ec2:...:vpc/vpc-abc123",
|
||||
"edge_type": "network",
|
||||
"label": "in-vpc"
|
||||
}
|
||||
],
|
||||
"stats": {
|
||||
"node_count": 42,
|
||||
"edge_count": 87
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### HTML Output (`*.inventory.html`)
|
||||
|
||||
Self-contained interactive visualization that opens in any modern browser. No server or build step required.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Design Decisions
|
||||
|
||||
| Decision | Rationale |
|
||||
|----------|-----------|
|
||||
| **Read from sys.modules** | Zero extra AWS API calls; services not scanned are silently skipped |
|
||||
| **Self-contained HTML** | D3.js v7 via CDN; no server, no build step; opens in any browser |
|
||||
| **One extractor per service** | Each extractor is independently testable; adding a new service = one new file + one line in the registry |
|
||||
| **Typed edges** | Semantic types allow downstream consumers (attack-path tools, Neo4j import) to filter by relationship class |
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
contrib/inventory-graph/
|
||||
├── README.md # This file
|
||||
├── inventory_graph.py # Main entry point script
|
||||
├── lib/
|
||||
│ ├── __init__.py
|
||||
│ ├── models.py # ResourceNode, ResourceEdge, ConnectivityGraph dataclasses
|
||||
│ ├── graph_builder.py # Reads loaded service clients from sys.modules
|
||||
│ ├── inventory_output.py # write_json(), write_html()
|
||||
│ └── extractors/
|
||||
│ ├── __init__.py
|
||||
│ ├── lambda_extractor.py # Lambda functions → VPC/subnet/SG/event-sources/layers/DLQ/KMS
|
||||
│ ├── ec2_extractor.py # EC2 instances + security groups → subnet/VPC
|
||||
│ ├── vpc_extractor.py # VPCs, subnets, peering connections
|
||||
│ ├── rds_extractor.py # RDS instances → VPC/SG/cluster/KMS
|
||||
│ ├── elbv2_extractor.py # ALB/NLB load balancers → SG/VPC
|
||||
│ ├── s3_extractor.py # S3 buckets → replication targets/logging buckets/KMS keys
|
||||
│ └── iam_extractor.py # IAM roles + trust-relationship edges
|
||||
└── examples/
|
||||
└── sample_output.html # Example output (optional)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Smoke Test (No AWS Credentials Needed)
|
||||
|
||||
```python
|
||||
import sys
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Wire a fake Lambda client
|
||||
mock_module = MagicMock()
|
||||
mock_fn = MagicMock()
|
||||
mock_fn.arn = "arn:aws:lambda:us-east-1:123:function:test"
|
||||
mock_fn.name = "test"
|
||||
mock_fn.region = "us-east-1"
|
||||
mock_fn.vpc_id = "vpc-abc"
|
||||
mock_fn.security_groups = ["sg-111"]
|
||||
mock_fn.subnet_ids = {"subnet-aaa"}
|
||||
mock_fn.environment = None
|
||||
mock_fn.kms_key_arn = None
|
||||
mock_fn.layers = []
|
||||
mock_fn.dead_letter_config = None
|
||||
mock_fn.event_source_mappings = []
|
||||
mock_module.awslambda_client.functions = {mock_fn.arn: mock_fn}
|
||||
mock_module.awslambda_client.audited_account = "123"
|
||||
sys.modules["prowler.providers.aws.services.awslambda.awslambda_client"] = mock_module
|
||||
|
||||
from contrib.inventory_graph.lib.graph_builder import build_graph
|
||||
from contrib.inventory_graph.lib.inventory_output import write_json, write_html
|
||||
|
||||
graph = build_graph()
|
||||
write_json(graph, "/tmp/test.inventory.json")
|
||||
write_html(graph, "/tmp/test.inventory.html")
|
||||
# Open /tmp/test.inventory.html in a browser
|
||||
```
|
||||
|
||||
## Extending
|
||||
|
||||
### Adding a New Service
|
||||
|
||||
1. Create a new extractor file in `lib/extractors/` (e.g., `dynamodb_extractor.py`)
|
||||
2. Implement the `extract(client)` function that returns `(nodes, edges)`
|
||||
3. Register it in `lib/graph_builder.py` in the `_SERVICE_REGISTRY` tuple
|
||||
|
||||
Example extractor template:
|
||||
|
||||
```python
|
||||
from typing import List, Tuple
|
||||
from prowler.lib.outputs.inventory.models import ResourceNode, ResourceEdge
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""Extract DynamoDB tables and their relationships."""
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
for table in client.tables:
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=table.arn,
|
||||
type="dynamodb_table",
|
||||
name=table.name,
|
||||
service="dynamodb",
|
||||
region=table.region,
|
||||
account_id=client.audited_account,
|
||||
properties={"billing_mode": table.billing_mode}
|
||||
)
|
||||
)
|
||||
|
||||
# Add edges for KMS encryption, streams, etc.
|
||||
if table.kms_key_arn:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=table.kms_key_arn,
|
||||
target_id=table.arn,
|
||||
edge_type="encrypts",
|
||||
label="encrypts"
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No nodes discovered
|
||||
|
||||
**Problem:** The tool reports "no nodes discovered" after running.
|
||||
|
||||
**Solution:** Ensure you've run a Prowler scan first. The tool reads from in-memory service clients loaded during the scan. If no services were scanned, no nodes will be discovered.
|
||||
|
||||
### Missing services in the graph
|
||||
|
||||
**Problem:** Some AWS services are not appearing in the graph.
|
||||
|
||||
**Solution:** The tool only includes services that have been scanned by Prowler. Run Prowler with the services you want to include, or run without service filters to scan all available services.
|
||||
|
||||
### HTML file doesn't display properly
|
||||
|
||||
**Problem:** The HTML visualization doesn't load or shows errors.
|
||||
|
||||
**Solution:**
|
||||
- Ensure you're opening the file in a modern browser (Chrome, Firefox, Safari, Edge)
|
||||
- Check your browser's console for JavaScript errors
|
||||
- Verify the file was generated completely (check file size > 0)
|
||||
- The HTML requires internet access to load D3.js from CDN
|
||||
|
||||
## Roadmap
|
||||
|
||||
Potential future enhancements:
|
||||
|
||||
- [ ] Support for additional AWS services (DynamoDB, SQS, SNS, etc.)
|
||||
- [ ] Export to Neo4j / Cartography format
|
||||
- [ ] Attack path analysis integration
|
||||
- [ ] Multi-account/multi-region aggregation
|
||||
- [ ] Custom edge type filtering in HTML UI
|
||||
- [ ] Graph diff between two scans
|
||||
|
||||
## Contributing
|
||||
|
||||
This is a community contribution. If you'd like to enhance it:
|
||||
|
||||
1. Fork the Prowler repository
|
||||
2. Make your changes in `contrib/inventory-graph/`
|
||||
3. Test thoroughly
|
||||
4. Submit a pull request with a clear description
|
||||
|
||||
## License
|
||||
|
||||
This tool is part of the Prowler project and is licensed under the Apache License 2.0.
|
||||
|
||||
## Credits
|
||||
|
||||
- **Author:** [@sandiyochristan](https://github.com/sandiyochristan)
|
||||
- **Related PR:** [#10382](https://github.com/prowler-cloud/prowler/pull/10382)
|
||||
- **Prowler Project:** [prowler-cloud/prowler](https://github.com/prowler-cloud/prowler)
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
|
||||
- Open an issue in the [Prowler repository](https://github.com/prowler-cloud/prowler/issues)
|
||||
- Join the [Prowler Community Slack](https://goto.prowler.com/slack)
|
||||
- Tag your issue with `contrib:inventory-graph`
|
||||
@@ -0,0 +1,181 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example: Generate AWS Inventory Graph with Mock Data
|
||||
|
||||
This example demonstrates how to use the inventory graph tool with mock AWS data.
|
||||
No AWS credentials required.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from lib.graph_builder import build_graph
|
||||
from lib.inventory_output import write_json, write_html
|
||||
|
||||
|
||||
def create_mock_lambda_client():
|
||||
"""Create a mock Lambda client with sample data."""
|
||||
mock_module = MagicMock()
|
||||
|
||||
# Create a mock Lambda function
|
||||
mock_fn = MagicMock()
|
||||
mock_fn.arn = "arn:aws:lambda:us-east-1:123456789012:function:my-test-function"
|
||||
mock_fn.name = "my-test-function"
|
||||
mock_fn.region = "us-east-1"
|
||||
mock_fn.vpc_id = "vpc-abc123"
|
||||
mock_fn.security_groups = ["sg-111222"]
|
||||
mock_fn.subnet_ids = {"subnet-aaa111", "subnet-bbb222"}
|
||||
mock_fn.environment = {"Variables": {"ENV": "production"}}
|
||||
mock_fn.kms_key_arn = (
|
||||
"arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
|
||||
)
|
||||
mock_fn.layers = []
|
||||
mock_fn.dead_letter_config = None
|
||||
mock_fn.event_source_mappings = []
|
||||
|
||||
mock_module.awslambda_client.functions = {mock_fn.arn: mock_fn}
|
||||
mock_module.awslambda_client.audited_account = "123456789012"
|
||||
|
||||
return mock_module
|
||||
|
||||
|
||||
def create_mock_ec2_client():
|
||||
"""Create a mock EC2 client with sample data."""
|
||||
mock_module = MagicMock()
|
||||
|
||||
# Create a mock EC2 instance
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.arn = (
|
||||
"arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0"
|
||||
)
|
||||
mock_instance.id = "i-1234567890abcdef0"
|
||||
mock_instance.region = "us-east-1"
|
||||
mock_instance.vpc_id = "vpc-abc123"
|
||||
mock_instance.subnet_id = "subnet-aaa111"
|
||||
mock_instance.security_groups = [MagicMock(id="sg-111222")]
|
||||
mock_instance.state = "running"
|
||||
mock_instance.type = "t3.micro"
|
||||
mock_instance.tags = [{"Key": "Name", "Value": "test-instance"}]
|
||||
|
||||
# Create a mock security group
|
||||
mock_sg = MagicMock()
|
||||
mock_sg.arn = "arn:aws:ec2:us-east-1:123456789012:security-group/sg-111222"
|
||||
mock_sg.id = "sg-111222"
|
||||
mock_sg.name = "test-security-group"
|
||||
mock_sg.region = "us-east-1"
|
||||
mock_sg.vpc_id = "vpc-abc123"
|
||||
|
||||
mock_module.ec2_client.instances = [mock_instance]
|
||||
mock_module.ec2_client.security_groups = [mock_sg]
|
||||
mock_module.ec2_client.audited_account = "123456789012"
|
||||
|
||||
return mock_module
|
||||
|
||||
|
||||
def create_mock_vpc_client():
|
||||
"""Create a mock VPC client with sample data."""
|
||||
mock_module = MagicMock()
|
||||
|
||||
# Create a mock VPC
|
||||
mock_vpc = MagicMock()
|
||||
mock_vpc.arn = "arn:aws:ec2:us-east-1:123456789012:vpc/vpc-abc123"
|
||||
mock_vpc.id = "vpc-abc123"
|
||||
mock_vpc.region = "us-east-1"
|
||||
mock_vpc.cidr_block = "10.0.0.0/16"
|
||||
mock_vpc.tags = [{"Key": "Name", "Value": "test-vpc"}]
|
||||
|
||||
# Create mock subnets
|
||||
mock_subnet1 = MagicMock()
|
||||
mock_subnet1.arn = "arn:aws:ec2:us-east-1:123456789012:subnet/subnet-aaa111"
|
||||
mock_subnet1.id = "subnet-aaa111"
|
||||
mock_subnet1.region = "us-east-1"
|
||||
mock_subnet1.vpc_id = "vpc-abc123"
|
||||
mock_subnet1.cidr_block = "10.0.1.0/24"
|
||||
mock_subnet1.availability_zone = "us-east-1a"
|
||||
|
||||
mock_subnet2 = MagicMock()
|
||||
mock_subnet2.arn = "arn:aws:ec2:us-east-1:123456789012:subnet/subnet-bbb222"
|
||||
mock_subnet2.id = "subnet-bbb222"
|
||||
mock_subnet2.region = "us-east-1"
|
||||
mock_subnet2.vpc_id = "vpc-abc123"
|
||||
mock_subnet2.cidr_block = "10.0.2.0/24"
|
||||
mock_subnet2.availability_zone = "us-east-1b"
|
||||
|
||||
mock_module.vpc_client.vpcs = [mock_vpc]
|
||||
mock_module.vpc_client.subnets = [mock_subnet1, mock_subnet2]
|
||||
mock_module.vpc_client.vpc_peering_connections = []
|
||||
mock_module.vpc_client.audited_account = "123456789012"
|
||||
|
||||
return mock_module
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to demonstrate the inventory graph generation."""
|
||||
print("=" * 70)
|
||||
print("AWS Inventory Graph - Mock Data Example")
|
||||
print("=" * 70)
|
||||
print()
|
||||
|
||||
# Create mock clients and inject them into sys.modules
|
||||
print("Creating mock AWS service clients...")
|
||||
sys.modules["prowler.providers.aws.services.awslambda.awslambda_client"] = (
|
||||
create_mock_lambda_client()
|
||||
)
|
||||
sys.modules["prowler.providers.aws.services.ec2.ec2_client"] = (
|
||||
create_mock_ec2_client()
|
||||
)
|
||||
sys.modules["prowler.providers.aws.services.vpc.vpc_client"] = (
|
||||
create_mock_vpc_client()
|
||||
)
|
||||
print("✓ Mock clients created")
|
||||
print()
|
||||
|
||||
# Build the graph
|
||||
print("Building connectivity graph...")
|
||||
graph = build_graph()
|
||||
print(f"✓ Graph built: {len(graph.nodes)} nodes, {len(graph.edges)} edges")
|
||||
print()
|
||||
|
||||
# Display discovered nodes
|
||||
print("Discovered nodes:")
|
||||
for node in graph.nodes:
|
||||
print(f" - {node.type}: {node.name} ({node.region})")
|
||||
print()
|
||||
|
||||
# Display discovered edges
|
||||
print("Discovered edges:")
|
||||
for edge in graph.edges:
|
||||
source_node = next((n for n in graph.nodes if n.id == edge.source_id), None)
|
||||
target_node = next((n for n in graph.nodes if n.id == edge.target_id), None)
|
||||
source_name = source_node.name if source_node else edge.source_id
|
||||
target_name = target_node.name if target_node else edge.target_id
|
||||
print(f" - {source_name} --[{edge.edge_type}]--> {target_name}")
|
||||
print()
|
||||
|
||||
# Write outputs
|
||||
output_dir = Path(__file__).parent
|
||||
json_path = output_dir / "example_output.inventory.json"
|
||||
html_path = output_dir / "example_output.inventory.html"
|
||||
|
||||
print("Writing output files...")
|
||||
write_json(graph, str(json_path))
|
||||
write_html(graph, str(html_path))
|
||||
print(f"✓ JSON written to: {json_path}")
|
||||
print(f"✓ HTML written to: {html_path}")
|
||||
print()
|
||||
|
||||
print("=" * 70)
|
||||
print("✓ Example complete!")
|
||||
print("=" * 70)
|
||||
print()
|
||||
print(f"Open the HTML file to view the interactive graph:")
|
||||
print(f" open {html_path}")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AWS Inventory Connectivity Graph Generator
|
||||
|
||||
A standalone tool that generates interactive connectivity graphs from Prowler AWS scans.
|
||||
This tool reads from already-loaded AWS service clients in memory and produces:
|
||||
- JSON graph (nodes + edges)
|
||||
- Interactive HTML visualization
|
||||
|
||||
Usage:
|
||||
python inventory_graph.py --output-directory ./output --output-filename my-inventory
|
||||
|
||||
For more information, see README.md
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Add the contrib directory to the path so we can import the lib modules
|
||||
CONTRIB_DIR = Path(__file__).parent
|
||||
sys.path.insert(0, str(CONTRIB_DIR))
|
||||
|
||||
from lib.graph_builder import build_graph
|
||||
from lib.inventory_output import write_json, write_html
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command-line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate AWS inventory connectivity graph from Prowler scan data",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Generate graph with default settings
|
||||
python inventory_graph.py
|
||||
|
||||
# Specify custom output directory and filename
|
||||
python inventory_graph.py --output-directory ./my-output --output-filename aws-inventory
|
||||
|
||||
# After running a Prowler scan
|
||||
prowler aws --profile my-profile
|
||||
python inventory_graph.py --output-directory ./output
|
||||
|
||||
For more information, see README.md
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output-directory",
|
||||
"-o",
|
||||
default="./output",
|
||||
help="Directory to save output files (default: ./output)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output-filename",
|
||||
"-f",
|
||||
default=None,
|
||||
help="Base filename without extension (default: prowler-inventory-<timestamp>)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
action="store_true",
|
||||
help="Enable verbose output",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the inventory graph generator."""
|
||||
args = parse_arguments()
|
||||
|
||||
# Set up output paths
|
||||
output_dir = Path(args.output_directory)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate filename with timestamp if not provided
|
||||
if args.output_filename:
|
||||
base_filename = args.output_filename
|
||||
else:
|
||||
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
base_filename = f"prowler-inventory-{timestamp}"
|
||||
|
||||
json_path = output_dir / f"{base_filename}.inventory.json"
|
||||
html_path = output_dir / f"{base_filename}.inventory.html"
|
||||
|
||||
print("=" * 70)
|
||||
print("AWS Inventory Connectivity Graph Generator")
|
||||
print("=" * 70)
|
||||
print()
|
||||
|
||||
# Build the graph from loaded service clients
|
||||
if args.verbose:
|
||||
print("Building connectivity graph from loaded AWS service clients...")
|
||||
|
||||
graph = build_graph()
|
||||
|
||||
# Check if any nodes were discovered
|
||||
if not graph.nodes:
|
||||
print("⚠️ WARNING: No nodes discovered!")
|
||||
print()
|
||||
print("This usually means:")
|
||||
print(" 1. No Prowler scan has been run yet in this Python session")
|
||||
print(" 2. No AWS service clients are loaded in memory")
|
||||
print()
|
||||
print("To fix this:")
|
||||
print(" 1. Run a Prowler scan first: prowler aws --output-formats csv")
|
||||
print(" 2. Then run this script in the same session")
|
||||
print()
|
||||
print(
|
||||
"Alternatively, integrate this tool directly into Prowler's output pipeline."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"✓ Discovered {len(graph.nodes)} nodes and {len(graph.edges)} edges")
|
||||
print()
|
||||
|
||||
# Write outputs
|
||||
if args.verbose:
|
||||
print(f"Writing JSON output to: {json_path}")
|
||||
write_json(graph, str(json_path))
|
||||
|
||||
if args.verbose:
|
||||
print(f"Writing HTML output to: {html_path}")
|
||||
write_html(graph, str(html_path))
|
||||
|
||||
print()
|
||||
print("=" * 70)
|
||||
print("✓ Graph generation complete!")
|
||||
print("=" * 70)
|
||||
print()
|
||||
print(f"📄 JSON: {json_path}")
|
||||
print(f"🌐 HTML: {html_path}")
|
||||
print()
|
||||
print(f"Open the HTML file in your browser to explore the interactive graph:")
|
||||
print(f" open {html_path}")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("\n\nInterrupted by user. Exiting...")
|
||||
sys.exit(130)
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error: {e}", file=sys.stderr)
|
||||
if "--verbose" in sys.argv or "-v" in sys.argv:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
@@ -0,0 +1,94 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract EC2 instance and security-group nodes with their edges.
|
||||
|
||||
Edges produced:
|
||||
- instance → security-group [network]
|
||||
- instance → subnet [network]
|
||||
- security-group → VPC [network]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
# EC2 Instances
|
||||
for instance in client.instances:
|
||||
name = instance.id
|
||||
for tag in instance.tags or []:
|
||||
if tag.get("Key") == "Name":
|
||||
name = tag["Value"]
|
||||
break
|
||||
|
||||
props = {
|
||||
"instance_type": getattr(instance, "type", None),
|
||||
"state": getattr(instance, "state", None),
|
||||
"vpc_id": getattr(instance, "vpc_id", None),
|
||||
"subnet_id": getattr(instance, "subnet_id", None),
|
||||
"public_ip": getattr(instance, "public_ip_address", None),
|
||||
"private_ip": getattr(instance, "private_ip_address", None),
|
||||
}
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=instance.arn,
|
||||
type="ec2_instance",
|
||||
name=name,
|
||||
service="ec2",
|
||||
region=instance.region,
|
||||
account_id=client.audited_account,
|
||||
properties={k: v for k, v in props.items() if v is not None},
|
||||
)
|
||||
)
|
||||
|
||||
for sg_id in instance.security_groups or []:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=instance.arn,
|
||||
target_id=sg_id,
|
||||
edge_type="network",
|
||||
label="sg",
|
||||
)
|
||||
)
|
||||
|
||||
if instance.subnet_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=instance.arn,
|
||||
target_id=instance.subnet_id,
|
||||
edge_type="network",
|
||||
label="subnet",
|
||||
)
|
||||
)
|
||||
|
||||
# Security Groups
|
||||
for sg in client.security_groups.values():
|
||||
name = (
|
||||
sg.name if hasattr(sg, "name") else sg.id if hasattr(sg, "id") else sg.arn
|
||||
)
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=sg.arn,
|
||||
type="security_group",
|
||||
name=name,
|
||||
service="ec2",
|
||||
region=sg.region,
|
||||
account_id=client.audited_account,
|
||||
properties={"vpc_id": sg.vpc_id},
|
||||
)
|
||||
)
|
||||
|
||||
if sg.vpc_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=sg.arn,
|
||||
target_id=sg.vpc_id,
|
||||
edge_type="network",
|
||||
label="in-vpc",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,60 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract ELBv2 (ALB/NLB) load balancer nodes and their edges.
|
||||
|
||||
Edges produced:
|
||||
- load_balancer → security-group [network]
|
||||
- load_balancer → VPC [network]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
for lb in client.loadbalancersv2.values():
|
||||
props = {
|
||||
"type": getattr(lb, "type", None),
|
||||
"scheme": getattr(lb, "scheme", None),
|
||||
"dns_name": getattr(lb, "dns", None),
|
||||
"vpc_id": getattr(lb, "vpc_id", None),
|
||||
}
|
||||
|
||||
name = getattr(lb, "name", lb.arn.split("/")[-2] if "/" in lb.arn else lb.arn)
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=lb.arn,
|
||||
type="load_balancer",
|
||||
name=name,
|
||||
service="elbv2",
|
||||
region=lb.region,
|
||||
account_id=client.audited_account,
|
||||
properties={k: v for k, v in props.items() if v is not None},
|
||||
)
|
||||
)
|
||||
|
||||
for sg_id in lb.security_groups or []:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=lb.arn,
|
||||
target_id=sg_id,
|
||||
edge_type="network",
|
||||
label="sg",
|
||||
)
|
||||
)
|
||||
|
||||
vpc_id = getattr(lb, "vpc_id", None)
|
||||
if vpc_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=lb.arn,
|
||||
target_id=vpc_id,
|
||||
edge_type="network",
|
||||
label="in-vpc",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,84 @@
|
||||
import json
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def _parse_trust_principals(assume_role_policy: Any) -> List[str]:
|
||||
"""
|
||||
Return a flat list of principal strings from an IAM assume-role policy document.
|
||||
The policy may be a dict already or a JSON string.
|
||||
"""
|
||||
if not assume_role_policy:
|
||||
return []
|
||||
|
||||
if isinstance(assume_role_policy, str):
|
||||
try:
|
||||
assume_role_policy = json.loads(assume_role_policy)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
return []
|
||||
|
||||
principals = []
|
||||
for statement in assume_role_policy.get("Statement", []):
|
||||
principal = statement.get("Principal", {})
|
||||
if isinstance(principal, str):
|
||||
principals.append(principal)
|
||||
elif isinstance(principal, dict):
|
||||
for v in principal.values():
|
||||
if isinstance(v, list):
|
||||
principals.extend(v)
|
||||
else:
|
||||
principals.append(v)
|
||||
elif isinstance(principal, list):
|
||||
principals.extend(principal)
|
||||
|
||||
return principals
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract IAM role nodes and their trust-relationship edges.
|
||||
|
||||
Edges produced:
|
||||
- trusted-principal → role [iam] (who can assume this role)
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
for role in client.roles:
|
||||
props: Dict[str, Any] = {
|
||||
"path": getattr(role, "path", None),
|
||||
"create_date": str(getattr(role, "create_date", "") or ""),
|
||||
}
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=role.arn,
|
||||
type="iam_role",
|
||||
name=role.name,
|
||||
service="iam",
|
||||
region="global",
|
||||
account_id=client.audited_account,
|
||||
properties={k: v for k, v in props.items() if v},
|
||||
)
|
||||
)
|
||||
|
||||
# Trust-relationship edges: principal → role (principal CAN assume role)
|
||||
try:
|
||||
for principal in _parse_trust_principals(role.assume_role_policy):
|
||||
if principal and principal != "*":
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=principal,
|
||||
target_id=role.arn,
|
||||
edge_type="iam",
|
||||
label="can-assume",
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"inventory iam_extractor: could not parse trust policy for {role.arn}: {e}"
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,118 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract Lambda function nodes and their edges from an awslambda_client.
|
||||
|
||||
Edges produced:
|
||||
- lambda → VPC [network]
|
||||
- lambda → subnet [network]
|
||||
- lambda → sg [network]
|
||||
- lambda → event-source[triggers] (from EventSourceMapping)
|
||||
- lambda → layer ARN [depends_on]
|
||||
- lambda → DLQ target [data_flow]
|
||||
- lambda → KMS key [encrypts]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
for fn in client.functions.values():
|
||||
props = {
|
||||
"runtime": fn.runtime,
|
||||
"vpc_id": fn.vpc_id,
|
||||
}
|
||||
if fn.environment:
|
||||
props["has_env_vars"] = True
|
||||
if fn.kms_key_arn:
|
||||
props["kms_key_arn"] = fn.kms_key_arn
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=fn.arn,
|
||||
type="lambda_function",
|
||||
name=fn.name,
|
||||
service="lambda",
|
||||
region=fn.region,
|
||||
account_id=client.audited_account,
|
||||
properties=props,
|
||||
)
|
||||
)
|
||||
|
||||
# Network edges → VPC, subnets, security groups
|
||||
if fn.vpc_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.arn,
|
||||
target_id=fn.vpc_id,
|
||||
edge_type="network",
|
||||
label="in-vpc",
|
||||
)
|
||||
)
|
||||
for sg_id in fn.security_groups or []:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.arn,
|
||||
target_id=sg_id,
|
||||
edge_type="network",
|
||||
label="sg",
|
||||
)
|
||||
)
|
||||
for subnet_id in fn.subnet_ids or set():
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.arn,
|
||||
target_id=subnet_id,
|
||||
edge_type="network",
|
||||
label="subnet",
|
||||
)
|
||||
)
|
||||
|
||||
# Trigger edges from event source mappings
|
||||
for esm in getattr(fn, "event_source_mappings", []):
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=esm.event_source_arn,
|
||||
target_id=fn.arn,
|
||||
edge_type="triggers",
|
||||
label=f"esm:{esm.state}",
|
||||
)
|
||||
)
|
||||
|
||||
# Layer dependency edges
|
||||
for layer in getattr(fn, "layers", []):
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.arn,
|
||||
target_id=layer.arn,
|
||||
edge_type="depends_on",
|
||||
label="layer",
|
||||
)
|
||||
)
|
||||
|
||||
# Dead-letter queue data-flow edge
|
||||
dlq = getattr(fn, "dead_letter_config", None)
|
||||
if dlq and dlq.target_arn:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.arn,
|
||||
target_id=dlq.target_arn,
|
||||
edge_type="data_flow",
|
||||
label="dlq",
|
||||
)
|
||||
)
|
||||
|
||||
# KMS encryption edge
|
||||
if fn.kms_key_arn:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=fn.kms_key_arn,
|
||||
target_id=fn.arn,
|
||||
edge_type="encrypts",
|
||||
label="kms",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,86 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract RDS DB instance nodes and their edges.
|
||||
|
||||
Edges produced:
|
||||
- db_instance → security-group [network]
|
||||
- db_instance → VPC [network]
|
||||
- db_instance → cluster [depends_on]
|
||||
- db_instance → KMS key [encrypts]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
for db in client.db_instances.values():
|
||||
props = {
|
||||
"engine": getattr(db, "engine", None),
|
||||
"engine_version": getattr(db, "engine_version", None),
|
||||
"instance_class": getattr(db, "db_instance_class", None),
|
||||
"vpc_id": getattr(db, "vpc_id", None),
|
||||
"multi_az": getattr(db, "multi_az", None),
|
||||
"publicly_accessible": getattr(db, "publicly_accessible", None),
|
||||
"storage_encrypted": getattr(db, "storage_encrypted", None),
|
||||
}
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=db.arn,
|
||||
type="rds_instance",
|
||||
name=db.id,
|
||||
service="rds",
|
||||
region=db.region,
|
||||
account_id=client.audited_account,
|
||||
properties={k: v for k, v in props.items() if v is not None},
|
||||
)
|
||||
)
|
||||
|
||||
for sg in getattr(db, "security_groups", []):
|
||||
sg_id = sg if isinstance(sg, str) else getattr(sg, "id", str(sg))
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=db.arn,
|
||||
target_id=sg_id,
|
||||
edge_type="network",
|
||||
label="sg",
|
||||
)
|
||||
)
|
||||
|
||||
vpc_id = getattr(db, "vpc_id", None)
|
||||
if vpc_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=db.arn,
|
||||
target_id=vpc_id,
|
||||
edge_type="network",
|
||||
label="in-vpc",
|
||||
)
|
||||
)
|
||||
|
||||
cluster_arn = getattr(db, "cluster_arn", None)
|
||||
if cluster_arn:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=db.arn,
|
||||
target_id=cluster_arn,
|
||||
edge_type="depends_on",
|
||||
label="cluster-member",
|
||||
)
|
||||
)
|
||||
|
||||
kms_key_id = getattr(db, "kms_key_id", None)
|
||||
if kms_key_id:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=kms_key_id,
|
||||
target_id=db.arn,
|
||||
edge_type="encrypts",
|
||||
label="kms",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,92 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract S3 bucket nodes and their edges.
|
||||
|
||||
Edges produced:
|
||||
- bucket → replication-target bucket [replicates_to]
|
||||
- bucket → KMS key [encrypts]
|
||||
- bucket → logging bucket [logs_to]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
for bucket in client.buckets.values():
|
||||
encryption = getattr(bucket, "encryption", None)
|
||||
versioning = getattr(bucket, "versioning_enabled", None)
|
||||
logging = getattr(bucket, "logging", None)
|
||||
public = getattr(bucket, "public_access_block", None)
|
||||
|
||||
props = {}
|
||||
if versioning is not None:
|
||||
props["versioning"] = versioning
|
||||
if encryption:
|
||||
enc_type = getattr(encryption, "type", str(encryption))
|
||||
props["encryption"] = enc_type
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=bucket.arn,
|
||||
type="s3_bucket",
|
||||
name=bucket.name,
|
||||
service="s3",
|
||||
region=bucket.region,
|
||||
account_id=client.audited_account,
|
||||
properties=props,
|
||||
)
|
||||
)
|
||||
|
||||
# Replication edges
|
||||
for rule in getattr(bucket, "replication_rules", None) or []:
|
||||
dest_bucket = getattr(rule, "destination_bucket", None)
|
||||
if dest_bucket:
|
||||
dest_arn = (
|
||||
dest_bucket
|
||||
if dest_bucket.startswith("arn:")
|
||||
else f"arn:aws:s3:::{dest_bucket}"
|
||||
)
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=bucket.arn,
|
||||
target_id=dest_arn,
|
||||
edge_type="replicates_to",
|
||||
label="s3-replication",
|
||||
)
|
||||
)
|
||||
|
||||
# Logging edges
|
||||
if logging:
|
||||
target_bucket = getattr(logging, "target_bucket", None)
|
||||
if target_bucket:
|
||||
target_arn = (
|
||||
target_bucket
|
||||
if target_bucket.startswith("arn:")
|
||||
else f"arn:aws:s3:::{target_bucket}"
|
||||
)
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=bucket.arn,
|
||||
target_id=target_arn,
|
||||
edge_type="logs_to",
|
||||
label="access-logs",
|
||||
)
|
||||
)
|
||||
|
||||
# KMS encryption edges
|
||||
if encryption:
|
||||
kms_arn = getattr(encryption, "kms_master_key_id", None)
|
||||
if kms_arn:
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=kms_arn,
|
||||
target_id=bucket.arn,
|
||||
edge_type="encrypts",
|
||||
label="kms",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,92 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from lib.models import ResourceEdge, ResourceNode
|
||||
|
||||
|
||||
def extract(client) -> Tuple[List[ResourceNode], List[ResourceEdge]]:
|
||||
"""
|
||||
Extract VPC and subnet nodes with their edges.
|
||||
|
||||
Edges produced:
|
||||
- subnet → VPC [depends_on]
|
||||
- peering connection between VPCs [network]
|
||||
"""
|
||||
nodes: List[ResourceNode] = []
|
||||
edges: List[ResourceEdge] = []
|
||||
|
||||
# VPCs
|
||||
for vpc in client.vpcs.values():
|
||||
name = vpc.id if hasattr(vpc, "id") else vpc.arn
|
||||
for tag in vpc.tags or []:
|
||||
if isinstance(tag, dict) and tag.get("Key") == "Name":
|
||||
name = tag["Value"]
|
||||
break
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=vpc.arn,
|
||||
type="vpc",
|
||||
name=name,
|
||||
service="vpc",
|
||||
region=vpc.region,
|
||||
account_id=client.audited_account,
|
||||
properties={
|
||||
"cidr_block": getattr(vpc, "cidr_block", None),
|
||||
"is_default": getattr(vpc, "is_default", None),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# VPC Subnets
|
||||
for subnet in client.vpc_subnets.values():
|
||||
name = subnet.id if hasattr(subnet, "id") else subnet.arn
|
||||
for tag in getattr(subnet, "tags", None) or []:
|
||||
if isinstance(tag, dict) and tag.get("Key") == "Name":
|
||||
name = tag["Value"]
|
||||
break
|
||||
|
||||
nodes.append(
|
||||
ResourceNode(
|
||||
id=subnet.arn,
|
||||
type="subnet",
|
||||
name=name,
|
||||
service="vpc",
|
||||
region=subnet.region,
|
||||
account_id=client.audited_account,
|
||||
properties={
|
||||
"vpc_id": getattr(subnet, "vpc_id", None),
|
||||
"cidr_block": getattr(subnet, "cidr_block", None),
|
||||
"availability_zone": getattr(subnet, "availability_zone", None),
|
||||
"public": getattr(subnet, "public", None),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
vpc_id = getattr(subnet, "vpc_id", None)
|
||||
if vpc_id:
|
||||
# Find the VPC ARN for this vpc_id
|
||||
vpc_arn = next(
|
||||
(v.arn for v in client.vpcs.values() if v.id == vpc_id),
|
||||
vpc_id,
|
||||
)
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=subnet.arn,
|
||||
target_id=vpc_arn,
|
||||
edge_type="depends_on",
|
||||
label="subnet-of",
|
||||
)
|
||||
)
|
||||
|
||||
# VPC Peering Connections
|
||||
for peering in getattr(client, "vpc_peering_connections", {}).values():
|
||||
edges.append(
|
||||
ResourceEdge(
|
||||
source_id=peering.arn,
|
||||
target_id=getattr(peering, "accepter_vpc_id", peering.arn),
|
||||
edge_type="network",
|
||||
label="vpc-peer",
|
||||
)
|
||||
)
|
||||
|
||||
return nodes, edges
|
||||
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
graph_builder.py
|
||||
----------------
|
||||
Builds a ConnectivityGraph by reading already-loaded AWS service clients from
|
||||
sys.modules. Only services that were actually scanned (i.e. whose client
|
||||
module is already imported) contribute nodes and edges. Unknown / unloaded
|
||||
services are silently skipped, so the output degrades gracefully when only a
|
||||
subset of checks has been run.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from typing import Tuple
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from lib.models import ConnectivityGraph
|
||||
|
||||
# Registry: (sys.modules key, attribute name inside that module, extractor module path)
|
||||
_SERVICE_REGISTRY: Tuple[Tuple[str, str, str], ...] = (
|
||||
(
|
||||
"prowler.providers.aws.services.awslambda.awslambda_client",
|
||||
"awslambda_client",
|
||||
"lib.extractors.lambda_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.ec2.ec2_client",
|
||||
"ec2_client",
|
||||
"lib.extractors.ec2_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.vpc.vpc_client",
|
||||
"vpc_client",
|
||||
"lib.extractors.vpc_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.rds.rds_client",
|
||||
"rds_client",
|
||||
"lib.extractors.rds_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.elbv2.elbv2_client",
|
||||
"elbv2_client",
|
||||
"lib.extractors.elbv2_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.s3.s3_client",
|
||||
"s3_client",
|
||||
"lib.extractors.s3_extractor",
|
||||
),
|
||||
(
|
||||
"prowler.providers.aws.services.iam.iam_client",
|
||||
"iam_client",
|
||||
"lib.extractors.iam_extractor",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def build_graph() -> ConnectivityGraph:
|
||||
"""
|
||||
Iterate over every registered service, check whether its client module is
|
||||
already loaded, and call the corresponding extractor.
|
||||
|
||||
Returns a ConnectivityGraph with all discovered nodes and edges.
|
||||
Duplicate node IDs are silently deduplicated (first occurrence wins).
|
||||
"""
|
||||
graph = ConnectivityGraph()
|
||||
seen_node_ids: set = set()
|
||||
|
||||
for client_module_key, client_attr, extractor_module_key in _SERVICE_REGISTRY:
|
||||
client_module = sys.modules.get(client_module_key)
|
||||
if client_module is None:
|
||||
continue
|
||||
|
||||
service_client = getattr(client_module, client_attr, None)
|
||||
if service_client is None:
|
||||
continue
|
||||
|
||||
extractor_module = sys.modules.get(extractor_module_key)
|
||||
if extractor_module is None:
|
||||
try:
|
||||
import importlib
|
||||
|
||||
extractor_module = importlib.import_module(extractor_module_key)
|
||||
except ImportError as e:
|
||||
logger.debug(
|
||||
f"inventory graph_builder: cannot import extractor {extractor_module_key}: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
nodes, edges = extractor_module.extract(service_client)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"inventory graph_builder: extractor {extractor_module_key} failed: "
|
||||
f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}]: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
for node in nodes:
|
||||
if node.id not in seen_node_ids:
|
||||
graph.add_node(node)
|
||||
seen_node_ids.add(node.id)
|
||||
|
||||
for edge in edges:
|
||||
graph.add_edge(edge)
|
||||
|
||||
return graph
|
||||
@@ -0,0 +1,502 @@
|
||||
"""
|
||||
inventory_output.py
|
||||
-------------------
|
||||
Writes the ConnectivityGraph produced by graph_builder to two files:
|
||||
|
||||
<output_path>.inventory.json – machine-readable graph (nodes + edges)
|
||||
<output_path>.inventory.html – interactive D3.js force-directed graph
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from dataclasses import asdict
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from lib.models import ConnectivityGraph
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JSON output
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def write_json(graph: ConnectivityGraph, file_path: str) -> None:
|
||||
"""Serialise the graph to a JSON file."""
|
||||
try:
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
data = {
|
||||
"generated_at": datetime.utcnow().isoformat() + "Z",
|
||||
"nodes": [asdict(n) for n in graph.nodes],
|
||||
"edges": [asdict(e) for e in graph.edges],
|
||||
"stats": {
|
||||
"node_count": len(graph.nodes),
|
||||
"edge_count": len(graph.edges),
|
||||
},
|
||||
}
|
||||
with open(file_path, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2, default=str)
|
||||
logger.info(f"Inventory graph JSON written to {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"inventory_output.write_json: {e.__class__.__name__}[{e.__traceback__.tb_lineno}]: {e}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# HTML output (self-contained, D3.js CDN)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Colour palette per node type
|
||||
_NODE_COLOURS = {
|
||||
"lambda_function": "#f59e0b",
|
||||
"ec2_instance": "#3b82f6",
|
||||
"security_group": "#6366f1",
|
||||
"vpc": "#10b981",
|
||||
"subnet": "#34d399",
|
||||
"rds_instance": "#ef4444",
|
||||
"load_balancer": "#8b5cf6",
|
||||
"s3_bucket": "#06b6d4",
|
||||
"iam_role": "#f97316",
|
||||
"default": "#94a3b8",
|
||||
}
|
||||
|
||||
# Edge stroke colours per edge type
|
||||
_EDGE_COLOURS = {
|
||||
"network": "#64748b",
|
||||
"iam": "#f97316",
|
||||
"triggers": "#a855f7",
|
||||
"data_flow": "#0ea5e9",
|
||||
"depends_on": "#94a3b8",
|
||||
"routes_to": "#22c55e",
|
||||
"replicates_to": "#ec4899",
|
||||
"encrypts": "#eab308",
|
||||
"logs_to": "#78716c",
|
||||
}
|
||||
|
||||
_HTML_TEMPLATE = """\
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
||||
<title>Prowler – AWS Connectivity Graph</title>
|
||||
<script src="https://d3js.org/d3.v7.min.js"></script>
|
||||
<style>
|
||||
*, *::before, *::after {{ box-sizing: border-box; }}
|
||||
body {{
|
||||
margin: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
||||
background: #0f172a;
|
||||
color: #e2e8f0;
|
||||
}}
|
||||
#header {{
|
||||
padding: 12px 20px;
|
||||
background: #1e293b;
|
||||
border-bottom: 1px solid #334155;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
}}
|
||||
#header h1 {{ margin: 0; font-size: 18px; font-weight: 700; }}
|
||||
#header .stats {{ font-size: 13px; color: #94a3b8; }}
|
||||
#controls {{
|
||||
padding: 8px 20px;
|
||||
background: #1e293b;
|
||||
border-bottom: 1px solid #334155;
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
}}
|
||||
#controls label {{ font-size: 12px; color: #94a3b8; }}
|
||||
#controls select, #controls input[type=range] {{
|
||||
background: #0f172a;
|
||||
color: #e2e8f0;
|
||||
border: 1px solid #334155;
|
||||
border-radius: 4px;
|
||||
padding: 3px 6px;
|
||||
font-size: 12px;
|
||||
}}
|
||||
#graph-container {{ width: 100%; height: calc(100vh - 100px); position: relative; }}
|
||||
svg {{ width: 100%; height: 100%; }}
|
||||
.node circle {{
|
||||
stroke: #1e293b;
|
||||
stroke-width: 1.5px;
|
||||
cursor: pointer;
|
||||
transition: r 0.15s;
|
||||
}}
|
||||
.node circle:hover {{ stroke-width: 3px; }}
|
||||
.node text {{
|
||||
font-size: 10px;
|
||||
fill: #e2e8f0;
|
||||
pointer-events: none;
|
||||
text-shadow: 0 0 4px #0f172a;
|
||||
}}
|
||||
.link {{
|
||||
stroke-opacity: 0.6;
|
||||
stroke-width: 1.5px;
|
||||
}}
|
||||
.link-label {{
|
||||
font-size: 8px;
|
||||
fill: #94a3b8;
|
||||
pointer-events: none;
|
||||
}}
|
||||
#tooltip {{
|
||||
position: fixed;
|
||||
background: #1e293b;
|
||||
border: 1px solid #334155;
|
||||
border-radius: 6px;
|
||||
padding: 10px 14px;
|
||||
font-size: 12px;
|
||||
pointer-events: none;
|
||||
max-width: 320px;
|
||||
word-break: break-all;
|
||||
z-index: 9999;
|
||||
display: none;
|
||||
}}
|
||||
#tooltip strong {{ color: #f8fafc; }}
|
||||
#tooltip .prop {{ color: #94a3b8; margin-top: 4px; }}
|
||||
#legend {{
|
||||
position: absolute;
|
||||
top: 10px;
|
||||
right: 10px;
|
||||
background: rgba(30,41,59,0.9);
|
||||
border: 1px solid #334155;
|
||||
border-radius: 6px;
|
||||
padding: 10px 14px;
|
||||
font-size: 11px;
|
||||
}}
|
||||
#legend h3 {{ margin: 0 0 6px; font-size: 12px; }}
|
||||
.legend-row {{ display: flex; align-items: center; gap: 6px; margin: 3px 0; }}
|
||||
.legend-dot {{ width: 12px; height: 12px; border-radius: 50%; flex-shrink: 0; }}
|
||||
.legend-line {{ width: 20px; height: 2px; flex-shrink: 0; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="header">
|
||||
<h1>🔗 AWS Connectivity Graph</h1>
|
||||
<span class="stats" id="stat-label">Generated: {generated_at}</span>
|
||||
</div>
|
||||
<div id="controls">
|
||||
<label>Filter service:
|
||||
<select id="filter-service">
|
||||
<option value="">All services</option>
|
||||
</select>
|
||||
</label>
|
||||
<label>Link distance:
|
||||
<input type="range" id="link-distance" min="40" max="300" value="120"/>
|
||||
</label>
|
||||
<label>Charge strength:
|
||||
<input type="range" id="charge-strength" min="-800" max="-20" value="-250"/>
|
||||
</label>
|
||||
<span class="stats" id="visible-count"></span>
|
||||
</div>
|
||||
<div id="graph-container">
|
||||
<svg id="graph-svg"></svg>
|
||||
<div id="tooltip"></div>
|
||||
<div id="legend">
|
||||
<h3>Node types</h3>
|
||||
{legend_nodes_html}
|
||||
<h3 style="margin-top:8px">Edge types</h3>
|
||||
{legend_edges_html}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const RAW_NODES = {nodes_json};
|
||||
const RAW_EDGES = {edges_json};
|
||||
const NODE_COLOURS = {node_colours_json};
|
||||
const EDGE_COLOURS = {edge_colours_json};
|
||||
|
||||
// ── helpers ──────────────────────────────────────────────────────────────
|
||||
function nodeColour(d) {{
|
||||
return NODE_COLOURS[d.type] || NODE_COLOURS["default"];
|
||||
}}
|
||||
function edgeColour(d) {{
|
||||
return EDGE_COLOURS[d.edge_type] || "#94a3b8";
|
||||
}}
|
||||
function nodeRadius(d) {{
|
||||
const base = {{
|
||||
lambda_function: 9, ec2_instance: 10, vpc: 14, subnet: 8,
|
||||
security_group: 7, rds_instance: 11, load_balancer: 12,
|
||||
s3_bucket: 9, iam_role: 9
|
||||
}};
|
||||
return base[d.type] || 8;
|
||||
}}
|
||||
|
||||
// ── filter controls ───────────────────────────────────────────────────────
|
||||
const services = [...new Set(RAW_NODES.map(n => n.service))].sort();
|
||||
const sel = document.getElementById("filter-service");
|
||||
services.forEach(s => {{
|
||||
const o = document.createElement("option");
|
||||
o.value = s; o.textContent = s;
|
||||
sel.appendChild(o);
|
||||
}});
|
||||
|
||||
// ── D3 setup ──────────────────────────────────────────────────────────────
|
||||
const svg = d3.select("#graph-svg");
|
||||
const container = svg.append("g");
|
||||
|
||||
// zoom
|
||||
svg.call(
|
||||
d3.zoom().scaleExtent([0.05, 8])
|
||||
.on("zoom", e => container.attr("transform", e.transform))
|
||||
);
|
||||
|
||||
// arrowhead marker
|
||||
const defs = svg.append("defs");
|
||||
defs.append("marker")
|
||||
.attr("id", "arrow")
|
||||
.attr("viewBox", "0 -5 10 10")
|
||||
.attr("refX", 20).attr("refY", 0)
|
||||
.attr("markerWidth", 6).attr("markerHeight", 6)
|
||||
.attr("orient", "auto")
|
||||
.append("path")
|
||||
.attr("d", "M0,-5L10,0L0,5")
|
||||
.attr("fill", "#94a3b8");
|
||||
|
||||
// tooltip
|
||||
const tooltip = document.getElementById("tooltip");
|
||||
|
||||
// ── simulation ────────────────────────────────────────────────────────────
|
||||
let simulation, linkSel, nodeSel, labelSel;
|
||||
|
||||
function buildGraph(nodeFilter) {{
|
||||
// Determine which nodes to show
|
||||
const visibleNodes = nodeFilter
|
||||
? RAW_NODES.filter(n => n.service === nodeFilter)
|
||||
: RAW_NODES;
|
||||
const visibleIds = new Set(visibleNodes.map(n => n.id));
|
||||
|
||||
// Only show edges where BOTH endpoints are visible
|
||||
const visibleEdges = RAW_EDGES.filter(
|
||||
e => visibleIds.has(e.source_id) && visibleIds.has(e.target_id)
|
||||
);
|
||||
|
||||
document.getElementById("visible-count").textContent =
|
||||
`Showing ${{visibleNodes.length}} nodes · ${{visibleEdges.length}} edges`;
|
||||
|
||||
container.selectAll("*").remove();
|
||||
|
||||
if (simulation) simulation.stop();
|
||||
|
||||
const nodes = visibleNodes.map(n => ({{ ...n }}));
|
||||
const nodeIndex = Object.fromEntries(nodes.map(n => [n.id, n]));
|
||||
|
||||
const links = visibleEdges.map(e => ({{
|
||||
...e,
|
||||
source: nodeIndex[e.source_id] || e.source_id,
|
||||
target: nodeIndex[e.target_id] || e.target_id,
|
||||
}}));
|
||||
|
||||
const dist = +document.getElementById("link-distance").value;
|
||||
const charge = +document.getElementById("charge-strength").value;
|
||||
|
||||
simulation = d3.forceSimulation(nodes)
|
||||
.force("link", d3.forceLink(links).id(d => d.id).distance(dist))
|
||||
.force("charge", d3.forceManyBody().strength(charge))
|
||||
.force("center", d3.forceCenter(
|
||||
document.getElementById("graph-container").clientWidth / 2,
|
||||
document.getElementById("graph-container").clientHeight / 2
|
||||
))
|
||||
.force("collision", d3.forceCollide().radius(d => nodeRadius(d) + 6));
|
||||
|
||||
// Edges
|
||||
linkSel = container.append("g").attr("class", "links")
|
||||
.selectAll("line")
|
||||
.data(links)
|
||||
.join("line")
|
||||
.attr("class", "link")
|
||||
.attr("stroke", edgeColour)
|
||||
.attr("marker-end", "url(#arrow)");
|
||||
|
||||
// Edge labels
|
||||
labelSel = container.append("g").attr("class", "link-labels")
|
||||
.selectAll("text")
|
||||
.data(links)
|
||||
.join("text")
|
||||
.attr("class", "link-label")
|
||||
.text(d => d.label || "");
|
||||
|
||||
// Nodes
|
||||
nodeSel = container.append("g").attr("class", "nodes")
|
||||
.selectAll("g")
|
||||
.data(nodes)
|
||||
.join("g")
|
||||
.attr("class", "node")
|
||||
.call(
|
||||
d3.drag()
|
||||
.on("start", (event, d) => {{
|
||||
if (!event.active) simulation.alphaTarget(0.3).restart();
|
||||
d.fx = d.x; d.fy = d.y;
|
||||
}})
|
||||
.on("drag", (event, d) => {{ d.fx = event.x; d.fy = event.y; }})
|
||||
.on("end", (event, d) => {{
|
||||
if (!event.active) simulation.alphaTarget(0);
|
||||
d.fx = null; d.fy = null;
|
||||
}})
|
||||
)
|
||||
.on("mouseover", (event, d) => {{
|
||||
const props = Object.entries(d.properties || {{}})
|
||||
.map(([k, v]) => `<div class="prop"><b>${{k}}</b>: ${{v}}</div>`)
|
||||
.join("");
|
||||
tooltip.innerHTML = `
|
||||
<strong>${{d.name}}</strong>
|
||||
<div class="prop"><b>type</b>: ${{d.type}}</div>
|
||||
<div class="prop"><b>service</b>: ${{d.service}}</div>
|
||||
<div class="prop"><b>region</b>: ${{d.region}}</div>
|
||||
<div class="prop"><b>account</b>: ${{d.account_id}}</div>
|
||||
<div class="prop" style="word-break:break-all"><b>arn</b>: ${{d.id}}</div>
|
||||
${{props}}
|
||||
`;
|
||||
tooltip.style.display = "block";
|
||||
tooltip.style.left = (event.clientX + 12) + "px";
|
||||
tooltip.style.top = (event.clientY - 10) + "px";
|
||||
}})
|
||||
.on("mousemove", event => {{
|
||||
tooltip.style.left = (event.clientX + 12) + "px";
|
||||
tooltip.style.top = (event.clientY - 10) + "px";
|
||||
}})
|
||||
.on("mouseout", () => {{ tooltip.style.display = "none"; }});
|
||||
|
||||
nodeSel.append("circle")
|
||||
.attr("r", nodeRadius)
|
||||
.attr("fill", nodeColour);
|
||||
|
||||
nodeSel.append("text")
|
||||
.attr("dx", d => nodeRadius(d) + 3)
|
||||
.attr("dy", "0.35em")
|
||||
.text(d => d.name.length > 24 ? d.name.slice(0, 22) + "…" : d.name);
|
||||
|
||||
simulation.on("tick", () => {{
|
||||
linkSel
|
||||
.attr("x1", d => d.source.x)
|
||||
.attr("y1", d => d.source.y)
|
||||
.attr("x2", d => d.target.x)
|
||||
.attr("y2", d => d.target.y);
|
||||
|
||||
labelSel
|
||||
.attr("x", d => (d.source.x + d.target.x) / 2)
|
||||
.attr("y", d => (d.source.y + d.target.y) / 2);
|
||||
|
||||
nodeSel.attr("transform", d => `translate(${{d.x}},${{d.y}})`);
|
||||
}});
|
||||
}}
|
||||
|
||||
// Initial render
|
||||
buildGraph(null);
|
||||
|
||||
// Filter change
|
||||
sel.addEventListener("change", () => buildGraph(sel.value || null));
|
||||
|
||||
// Simulation control sliders — restart on change
|
||||
document.getElementById("link-distance").addEventListener("input", () => buildGraph(sel.value || null));
|
||||
document.getElementById("charge-strength").addEventListener("input", () => buildGraph(sel.value || null));
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
|
||||
def _build_legend_html(colours: dict, shape: str) -> str:
|
||||
rows = []
|
||||
for key, colour in sorted(colours.items()):
|
||||
if shape == "dot":
|
||||
rows.append(
|
||||
f'<div class="legend-row">'
|
||||
f'<div class="legend-dot" style="background:{colour}"></div>'
|
||||
f"<span>{key}</span></div>"
|
||||
)
|
||||
else:
|
||||
rows.append(
|
||||
f'<div class="legend-row">'
|
||||
f'<div class="legend-line" style="background:{colour}"></div>'
|
||||
f"<span>{key}</span></div>"
|
||||
)
|
||||
return "\n".join(rows)
|
||||
|
||||
|
||||
def write_html(graph: ConnectivityGraph, file_path: str) -> None:
|
||||
"""Render the graph as a self-contained interactive HTML page."""
|
||||
try:
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
|
||||
nodes_json = json.dumps(
|
||||
[
|
||||
{
|
||||
"id": n.id,
|
||||
"type": n.type,
|
||||
"name": n.name,
|
||||
"service": n.service,
|
||||
"region": n.region,
|
||||
"account_id": n.account_id,
|
||||
"properties": n.properties,
|
||||
}
|
||||
for n in graph.nodes
|
||||
],
|
||||
indent=None,
|
||||
default=str,
|
||||
)
|
||||
edges_json = json.dumps(
|
||||
[
|
||||
{
|
||||
"source_id": e.source_id,
|
||||
"target_id": e.target_id,
|
||||
"edge_type": e.edge_type,
|
||||
"label": e.label or "",
|
||||
}
|
||||
for e in graph.edges
|
||||
],
|
||||
indent=None,
|
||||
default=str,
|
||||
)
|
||||
|
||||
html = _HTML_TEMPLATE.format(
|
||||
generated_at=datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC"),
|
||||
nodes_json=nodes_json,
|
||||
edges_json=edges_json,
|
||||
node_colours_json=json.dumps(_NODE_COLOURS),
|
||||
edge_colours_json=json.dumps(_EDGE_COLOURS),
|
||||
legend_nodes_html=_build_legend_html(_NODE_COLOURS, "dot"),
|
||||
legend_edges_html=_build_legend_html(_EDGE_COLOURS, "line"),
|
||||
)
|
||||
|
||||
with open(file_path, "w", encoding="utf-8") as fh:
|
||||
fh.write(html)
|
||||
|
||||
logger.info(f"Inventory graph HTML written to {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"inventory_output.write_html: {e.__class__.__name__}[{e.__traceback__.tb_lineno}]: {e}"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Convenience entry-point called from __main__.py
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def generate_inventory_outputs(output_path: str) -> None:
|
||||
"""
|
||||
Build the connectivity graph from currently-loaded service clients and write
|
||||
both JSON and HTML outputs.
|
||||
|
||||
Args:
|
||||
output_path: base file path WITHOUT extension, e.g.
|
||||
"output/prowler-output-20240101120000".
|
||||
The function appends .inventory.json and .inventory.html.
|
||||
"""
|
||||
from lib.graph_builder import build_graph
|
||||
|
||||
graph = build_graph()
|
||||
|
||||
if not graph.nodes:
|
||||
logger.warning(
|
||||
"Inventory graph: no nodes discovered. "
|
||||
"Make sure at least one AWS service was scanned before generating the inventory."
|
||||
)
|
||||
|
||||
write_json(graph, f"{output_path}.inventory.json")
|
||||
write_html(graph, f"{output_path}.inventory.html")
|
||||
@@ -0,0 +1,71 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResourceNode:
|
||||
"""
|
||||
Represents a single AWS resource as a node in the connectivity graph.
|
||||
|
||||
id : globally unique identifier — always the resource ARN
|
||||
type : coarse resource type used for grouping/colour, e.g. "lambda_function"
|
||||
name : human-readable label shown on the graph
|
||||
service : AWS service name, e.g. "lambda", "ec2", "rds"
|
||||
region : AWS region the resource lives in
|
||||
account_id: AWS account ID
|
||||
properties: additional resource-specific metadata (runtime, vpc_id, etc.)
|
||||
"""
|
||||
|
||||
id: str
|
||||
type: str
|
||||
name: str
|
||||
service: str
|
||||
region: str
|
||||
account_id: str
|
||||
properties: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResourceEdge:
|
||||
"""
|
||||
Represents a directional relationship between two resource nodes.
|
||||
|
||||
source_id : ARN of the source node
|
||||
target_id : ARN of the target node
|
||||
edge_type : semantic type of the relationship, e.g.:
|
||||
"network" – resources share a network path (VPC/subnet/SG)
|
||||
"iam" – IAM trust or permission relationship
|
||||
"triggers" – one resource can invoke another (event source → Lambda)
|
||||
"data_flow" – data is written/read (Lambda → SQS dead-letter queue)
|
||||
"depends_on" – soft dependency (Lambda layer, subnet belongs to VPC)
|
||||
"routes_to" – traffic routing (LB → target)
|
||||
"encrypts" – KMS key encrypts the resource
|
||||
label : optional short label rendered on the edge in the HTML graph
|
||||
"""
|
||||
|
||||
source_id: str
|
||||
target_id: str
|
||||
edge_type: str
|
||||
label: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectivityGraph:
|
||||
"""
|
||||
Container for the full inventory connectivity graph.
|
||||
|
||||
nodes: all discovered resource nodes
|
||||
edges: all discovered edges between nodes
|
||||
"""
|
||||
|
||||
nodes: List[ResourceNode] = field(default_factory=list)
|
||||
edges: List[ResourceEdge] = field(default_factory=list)
|
||||
|
||||
def add_node(self, node: ResourceNode) -> None:
|
||||
self.nodes.append(node)
|
||||
|
||||
def add_edge(self, edge: ResourceEdge) -> None:
|
||||
self.edges.append(edge)
|
||||
|
||||
def node_ids(self) -> set:
|
||||
return {n.id for n in self.nodes}
|
||||
@@ -10,10 +10,10 @@ This repository contains the Prowler Open Source documentation powered by [Mintl
|
||||
|
||||
## Local Development
|
||||
|
||||
Install the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview documentation changes locally:
|
||||
Install a reviewed version of the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview documentation changes locally:
|
||||
|
||||
```bash
|
||||
npm i -g mint
|
||||
npm install --global mint@4.2.560
|
||||
```
|
||||
|
||||
Run the following command at the root of your documentation (where `mint.json` is located):
|
||||
|
||||
@@ -28,7 +28,7 @@ This includes the [AGENTS.md](https://github.com/prowler-cloud/prowler/blob/mast
|
||||
<Steps>
|
||||
<Step title="Install Mintlify CLI">
|
||||
```bash
|
||||
npm i -g mint
|
||||
npm install --global mint@4.2.560
|
||||
```
|
||||
For detailed instructions, check the [Mintlify documentation](https://www.mintlify.com/docs/installation).
|
||||
</Step>
|
||||
|
||||
@@ -326,12 +326,25 @@
|
||||
"user-guide/providers/openstack/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Scaleway",
|
||||
"pages": [
|
||||
"user-guide/providers/scaleway/getting-started-scaleway"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Vercel",
|
||||
"pages": [
|
||||
"user-guide/providers/vercel/getting-started-vercel",
|
||||
"user-guide/providers/vercel/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Okta",
|
||||
"pages": [
|
||||
"user-guide/providers/okta/getting-started-okta",
|
||||
"user-guide/providers/okta/authentication"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@ Complete reference guide for all tools available in the Prowler MCP Server. Tool
|
||||
|----------|------------|------------------------|
|
||||
| Prowler Hub | 10 tools | No |
|
||||
| Prowler Documentation | 2 tools | No |
|
||||
| Prowler Cloud/App | 29 tools | Yes |
|
||||
| Prowler Cloud/App | 32 tools | Yes |
|
||||
|
||||
## Tool Naming Convention
|
||||
|
||||
@@ -36,6 +36,14 @@ Tools for searching, viewing, and analyzing security findings across all cloud p
|
||||
- **`prowler_app_get_finding_details`** - Get comprehensive details about a specific finding including remediation guidance, check metadata, and resource relationships
|
||||
- **`prowler_app_get_findings_overview`** - Get aggregate statistics and trends about security findings as a markdown report
|
||||
|
||||
### Finding Groups Management
|
||||
|
||||
Tools for listing finding groups aggregated by check ID, viewing complete group counters, and drilling down into affected resources.
|
||||
|
||||
- **`prowler_app_list_finding_groups`** - List latest or historical finding groups with filters for provider, region, service, resource, category, check, severity, status, muted state, delta, date range, and sorting
|
||||
- **`prowler_app_get_finding_group_details`** - Get complete details for a specific finding group including counters, description, timestamps, and impacted providers
|
||||
- **`prowler_app_list_finding_group_resources`** - List actionable unmuted resources affected by a finding group by default, including nested resource and provider data plus the `finding_id` for remediation details. Set `include_muted` to include suppressed resources
|
||||
|
||||
### Provider Management
|
||||
|
||||
Tools for managing cloud provider connections in Prowler.
|
||||
|
||||
@@ -44,13 +44,21 @@ Choose the configuration based on your deployment:
|
||||
|
||||
<Tab title="Generic without Native HTTP Support">
|
||||
**Configuration:**
|
||||
<Warning>
|
||||
Avoid configuring MCP clients to run `npx mcp-remote` directly. `npx` can download and execute a new package version on each run. Install a reviewed version of `mcp-remote` in a dedicated local workspace, then point the MCP client to the installed binary.
|
||||
</Warning>
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp", // or your self-hosted Prowler MCP Server URL
|
||||
"--header",
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
@@ -72,14 +80,20 @@ Choose the configuration based on your deployment:
|
||||
2. Go to "Developer" tab
|
||||
3. Click in "Edit Config" button
|
||||
4. Edit the `claude_desktop_config.json` file with your favorite editor
|
||||
5. Add the following configuration:
|
||||
5. Install a reviewed version of `mcp-remote` in a dedicated local workspace:
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
6. Add the following configuration:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp",
|
||||
"--header",
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
|
||||
@@ -38,7 +38,7 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
|
||||
- `git` installed.
|
||||
- `poetry` installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
- `npm` installed: [npm installation](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm).
|
||||
- `pnpm` installed through [Corepack](https://pnpm.io/installation#using-corepack) or the standalone [pnpm installation](https://pnpm.io/installation).
|
||||
- `Docker Compose` installed: https://docs.docker.com/compose/install/.
|
||||
|
||||
<Warning>
|
||||
@@ -97,9 +97,11 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
```bash
|
||||
git clone https://github.com/prowler-cloud/prowler \
|
||||
cd prowler/ui \
|
||||
npm install \
|
||||
npm run build \
|
||||
npm start
|
||||
corepack enable \
|
||||
corepack install \
|
||||
pnpm install --frozen-lockfile \
|
||||
pnpm run build \
|
||||
pnpm start
|
||||
```
|
||||
|
||||
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
|
||||
|
||||
@@ -35,6 +35,7 @@ Prowler supports a wide range of providers organized by category:
|
||||
| **NHN** | Unofficial | Tenants | CLI |
|
||||
| [OpenStack](/user-guide/providers/openstack/getting-started-openstack) | Official | Projects | UI, API, CLI |
|
||||
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | Tenancies / Compartments | UI, API, CLI |
|
||||
| [Scaleway](/user-guide/providers/scaleway/getting-started-scaleway) [Contact us](https://prowler.com/contact) | Unofficial | Organizations | CLI |
|
||||
|
||||
### Infrastructure as Code Providers
|
||||
|
||||
@@ -47,11 +48,12 @@ Prowler supports a wide range of providers organized by category:
|
||||
| Provider | Support | Audit Scope/Entities | Interface |
|
||||
| ----------------------------------------------------------------------------------------- | -------- | ---------------------------- | ------------ |
|
||||
| [GitHub](/user-guide/providers/github/getting-started-github) | Official | Organizations / Repositories | UI, API, CLI |
|
||||
| [Google Workspace](/user-guide/providers/googleworkspace/getting-started-googleworkspace) | Official | Domains | CLI |
|
||||
| [Google Workspace](/user-guide/providers/googleworkspace/getting-started-googleworkspace) | Official | Domains | UI, API, CLI |
|
||||
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | Models | CLI |
|
||||
| [M365](/user-guide/providers/microsoft365/getting-started-m365) | Official | Tenants | UI, API, CLI |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | Organizations | UI, API, CLI |
|
||||
| [Vercel](/user-guide/providers/vercel/getting-started-vercel) | Official | Teams / Projects | CLI |
|
||||
| [Okta](/user-guide/providers/okta/getting-started-okta) | Official | Organizations | CLI |
|
||||
| [Vercel](/user-guide/providers/vercel/getting-started-vercel) | Official | Teams / Projects | UI, API, CLI |
|
||||
|
||||
### Kubernetes
|
||||
|
||||
|
||||
@@ -158,6 +158,15 @@ The following list includes all the Vercel checks with configurable variables th
|
||||
| `team_member_role_least_privilege` | `max_owners` | Integer |
|
||||
| `team_no_stale_invitations` | `stale_invitation_threshold_days` | Integer |
|
||||
|
||||
## Okta
|
||||
|
||||
### Configurable Checks
|
||||
The following list includes all the Okta checks with configurable variables that can be changed in the configuration YAML file:
|
||||
|
||||
| Check Name | Value | Type |
|
||||
|---------------------------------------------------------------|------------------------------------|---------|
|
||||
| `signon_global_session_idle_timeout_15min` | `okta_max_session_idle_minutes` | Integer |
|
||||
|
||||
## Config YAML File Structure
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -18,9 +18,11 @@ prowler <provider> --scan-unused-services
|
||||
|
||||
#### ACM (AWS Certificate Manager)
|
||||
|
||||
Certificates stored in ACM without active usage in AWS resources are excluded. By default, Prowler only scans actively used certificates. Unused certificates will not be checked if they are expired, if their expiring date is near or if they are good.
|
||||
Certificates stored in ACM without active usage in AWS resources are excluded. By default, Prowler only scans actively used certificates. Unused certificates are not evaluated for expiration, transparency logging, or weak key algorithms.
|
||||
|
||||
- `acm_certificates_expiration_check`
|
||||
- `acm_certificates_transparency_logs_enabled`
|
||||
- `acm_certificates_with_secure_key_algorithms`
|
||||
|
||||
#### Athena
|
||||
|
||||
@@ -28,6 +30,13 @@ Upon AWS account creation, Athena provisions a default primary workgroup for the
|
||||
|
||||
- `athena_workgroup_encryption`
|
||||
- `athena_workgroup_enforce_configuration`
|
||||
- `athena_workgroup_logging_enabled`
|
||||
|
||||
#### Amazon Bedrock
|
||||
|
||||
Generative AI workloads benefit from private VPC endpoint connectivity to keep prompt and model traffic off the public internet. Prowler only evaluates this configuration for VPCs in use (with active ENIs).
|
||||
|
||||
- `bedrock_vpc_endpoints_configured`
|
||||
|
||||
#### AWS CloudTrail
|
||||
|
||||
@@ -38,15 +47,23 @@ AWS CloudTrail should have at least one trail with a data event to record all S3
|
||||
|
||||
#### AWS Elastic Compute Cloud (EC2)
|
||||
|
||||
If Amazon Elastic Block Store (EBS) default encyption is not enabled, sensitive data at rest will remain unprotected in EC2. However, Prowler will only generate a finding if EBS volumes exist where default encryption could be enforced.
|
||||
If Amazon Elastic Block Store (EBS) default encryption is not enabled, sensitive data at rest remains unprotected in EC2. Prowler only generates a finding if EBS volumes exist where default encryption could be enforced.
|
||||
|
||||
- `ec2_ebs_default_encryption`
|
||||
|
||||
**EBS Snapshot Public Access**: Public EBS snapshots can leak data. Prowler only evaluates the account-level block setting if EBS snapshots exist in the account.
|
||||
|
||||
- `ec2_ebs_snapshot_account_block_public_access`
|
||||
|
||||
**EC2 Instance Metadata Service (IMDS)**: Enforcing IMDSv2 at the account level mitigates SSRF-based credential theft. Prowler only evaluates the account-level setting if EC2 instances exist in the account.
|
||||
|
||||
- `ec2_instance_account_imdsv2_enabled`
|
||||
|
||||
**Security Groups**: Misconfigured security groups increase the attack surface.
|
||||
|
||||
Prowler scans only attached security groups to report vulnerabilities in actively used configurations. Applies to:
|
||||
|
||||
- 15 security group-related checks, including open ports and ingress/egress traffic rules.
|
||||
- 20 security group-related checks, including open ports and ingress/egress traffic rules.
|
||||
|
||||
- `ec2_securitygroup_allow_ingress_from_internet_to_port_X`
|
||||
- `ec2_securitygroup_default_restrict_traffic`
|
||||
@@ -56,6 +73,18 @@ Prowler scans only attached security groups to report vulnerabilities in activel
|
||||
|
||||
- `ec2_networkacl_allow_ingress_X_port`
|
||||
|
||||
#### AWS Identity and Access Management (IAM)
|
||||
|
||||
Customer-managed IAM policies that are not attached to any user, group, or role grant no effective permissions until a principal is bound to them. Prowler treats such policies as dormant by default and skips the content-evaluation checks below when `--scan-unused-services` is not set. Enable the flag to surface findings on unattached policies as well.
|
||||
|
||||
- `iam_policy_allows_privilege_escalation`
|
||||
- `iam_policy_no_full_access_to_cloudtrail`
|
||||
- `iam_policy_no_full_access_to_kms`
|
||||
- `iam_policy_no_wildcard_marketplace_subscribe`
|
||||
- `iam_no_custom_policy_permissive_role_assumption`
|
||||
|
||||
The dedicated `iam_customer_unattached_policy_no_administrative_privileges` check still inspects unattached policies regardless of the flag, since its purpose is to highlight dormant administrator privileges.
|
||||
|
||||
#### AWS Glue
|
||||
|
||||
AWS Glue best practices recommend encrypting metadata and connection passwords in Data Catalogs.
|
||||
@@ -71,6 +100,12 @@ Amazon Inspector is a vulnerability discovery service that automates continuous
|
||||
|
||||
- `inspector2_is_enabled`
|
||||
|
||||
#### AWS Key Management Service (KMS)
|
||||
|
||||
Customer managed Customer Master Keys (CMKs) in the `Disabled` state cannot be used for cryptographic operations, so Prowler skips the unintentional-deletion check on them by default. Enable the flag to evaluate disabled CMKs as well.
|
||||
|
||||
- `kms_cmk_not_deleted_unintentionally`
|
||||
|
||||
#### Amazon Macie
|
||||
|
||||
Amazon Macie leverages machine learning to automatically discover, classify, and protect sensitive data in S3 buckets. Prowler only generates findings if Macie is disabled and there are S3 buckets in the AWS account.
|
||||
@@ -83,6 +118,15 @@ A network firewall is essential for monitoring and controlling traffic within a
|
||||
|
||||
- `networkfirewall_in_all_vpc`
|
||||
|
||||
#### Amazon Relational Database Service (RDS)
|
||||
|
||||
RDS event subscriptions notify operators of critical database events. Prowler only evaluates these subscription checks when RDS clusters or instances exist in the account.
|
||||
|
||||
- `rds_cluster_critical_event_subscription`
|
||||
- `rds_instance_critical_event_subscription`
|
||||
- `rds_instance_event_subscription_parameter_groups`
|
||||
- `rds_instance_event_subscription_security_groups`
|
||||
|
||||
#### Amazon S3
|
||||
|
||||
To prevent unintended data exposure:
|
||||
@@ -99,6 +143,10 @@ VPC settings directly impact network security and availability.
|
||||
|
||||
- `vpc_flow_logs_enabled`
|
||||
|
||||
- VPC Endpoint for EC2: Routes EC2 API calls through a private VPC endpoint to keep traffic off the public internet. Prowler only evaluates this configuration for VPCs in use, i.e., those with active ENIs.
|
||||
|
||||
- `vpc_endpoint_for_ec2_enabled`
|
||||
|
||||
- VPC Subnet Public IP Restrictions: Prevent unintended exposure of resources to the internet. Prowler only checks this configuration for VPCs in use, i.e., those with active ENIs.
|
||||
|
||||
- `vpc_subnet_no_public_ip_by_default`
|
||||
|
||||
@@ -22,7 +22,7 @@ Install promptfoo using one of the following methods:
|
||||
|
||||
**Using npm:**
|
||||
```bash
|
||||
npm install -g promptfoo
|
||||
npm install --global promptfoo@0.121.11
|
||||
```
|
||||
|
||||
**Using Homebrew (macOS):**
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
---
|
||||
title: 'Okta Authentication in Prowler'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.27.0" />
|
||||
|
||||
Prowler authenticates to Okta as a **service application** using **OAuth 2.0 with a private-key JWT** (Client Credentials grant). The integration is read-only by scope and follows DISA STIG guidance for least-privilege access.
|
||||
|
||||
## Common Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- An Okta organization. The UI examples below use **Identity Engine** terminology such as **Global Session Policy**; Classic Engine exposes equivalent sign-on policy concepts under older naming.
|
||||
- A **Super Administrator** account on that organization for the one-time service-app setup.
|
||||
- An **API Services** app integration created in the Okta Admin Console.
|
||||
|
||||
### Authentication Method Overview
|
||||
|
||||
| Method | Status | Use Case |
|
||||
|---|---|---|
|
||||
| **OAuth 2.0 (private-key JWT)** | Supported | Production scans, CI/CD, Prowler App. |
|
||||
|
||||
The private-key JWT flow is the only supported authentication method in the initial release. The service application proves possession of a private key on every token request; Okta returns a short-lived access token, refreshed automatically by the SDK.
|
||||
|
||||
<Note>
|
||||
If a different authentication method is needed (SSWS API token, OAuth with user delegation, etc.), please open a [feature request](https://github.com/prowler-cloud/prowler/issues/new?template=feature-request.yml) describing the use case.
|
||||
</Note>
|
||||
|
||||
### Required OAuth Scopes
|
||||
|
||||
For the initial check (`signon_global_session_idle_timeout_15min`) only one scope is required:
|
||||
|
||||
- `okta.policies.read`
|
||||
|
||||
Additional scopes will be needed as more services and checks are added, this are the current ones needed:
|
||||
|
||||
| Scope | Used by |
|
||||
|---|---|
|
||||
| `okta.policies.read` | Sign-on / password / authentication policies |
|
||||
|
||||
### Required Admin Role
|
||||
|
||||
The service application must be assigned the built-in **Read-Only Administrator** role.
|
||||
|
||||
Okta's Management API enforces a two-layer authorization model: an OAuth **scope** decides which API endpoints the token can call, and an **admin role** decides whether the call returns data. With only a scope granted, the token mint succeeds but every read returns `403 Forbidden`. The Read-Only Administrator role is the minimum that lets the granted `okta.*.read` scopes actually return configuration data to Prowler's checks — without it, the credential probe at provider startup fails and the scan never gets to evaluate any check.
|
||||
|
||||
Read-Only Administrator is intentionally the narrowest role that satisfies this requirement and aligns with the least-privilege guidance in DISA STIG.
|
||||
|
||||
## Step-by-Step Setup
|
||||
|
||||
### 1. Go to the admin console
|
||||
|
||||

|
||||
|
||||
### 2. [Optional] - Disable the privilege-escalation bypass (org-wide, one-time)
|
||||
|
||||
In the Okta Admin Console, go to **Settings → Account → Public client app admins** and ensure it is **off**. When enabled, every API Services app can be auto-assigned the Super Administrator role after scopes are granted, which would invalidate the read-only premise of this integration.
|
||||
|
||||

|
||||
|
||||
### 3. Create the API Services app
|
||||
|
||||
1. Go to **Applications → Applications**.
|
||||
|
||||

|
||||
|
||||
2. **Create App Integration**
|
||||
|
||||

|
||||
|
||||
3. Sign-in method: **API Services**. Click **Next**.
|
||||
4. Name the app (for example, `Prowler Scanner`) and click **Save**.
|
||||
5. Copy the displayed **Client ID** — you'll use it as `OKTA_CLIENT_ID`.
|
||||
|
||||

|
||||
|
||||
### 4. Switch to private-key authentication and generate a keypair
|
||||
|
||||
On the new app's **General** tab, scroll to **Client Credentials**:
|
||||
|
||||
1. Click **Edit**.
|
||||
2. Set **Client authentication** to **Public key / Private key**.
|
||||
3. Under **Public Keys**, click **Add key**.
|
||||
4. In the modal, click **Generate new key**. Okta creates a JWK pair.
|
||||
5. Click the **PEM** tab to switch the displayed format (or keep JWK — Prowler accepts both).
|
||||
6. Copy the entire `-----BEGIN PRIVATE KEY-----` block (or the JWK JSON).
|
||||
7. Click **Done**, then **Save**.
|
||||
|
||||
<Warning>
|
||||
Okta displays the private key **only once**. If you close the modal without copying, you must generate a new key.
|
||||
</Warning>
|
||||
|
||||

|
||||
|
||||
### 5. Grant the required OAuth scopes
|
||||
|
||||
On the app, open the **Okta API Scopes** tab and click **Grant** on every scope Prowler needs. For the initial release, granting only `okta.policies.read` is sufficient.
|
||||
|
||||

|
||||
|
||||
### 6. Assign the Read-Only Administrator role
|
||||
|
||||
On the app, open the **Admin roles** tab and click **Edit assignments → Add assignment**:
|
||||
|
||||
- **Role:** Read-Only Administrator
|
||||
- **Resources:** All resources
|
||||
|
||||
Save the changes.
|
||||
|
||||

|
||||
|
||||
### 7. [Optional] Verify DPoP setting
|
||||
|
||||
Prowler sends DPoP (Demonstrating Proof of Possession) proofs on every token request. The integration works whether the **Require Demonstrating Proof of Possession (DPoP) header in token requests** setting on the service app is on or off — but enabling it is the more secure default.
|
||||
|
||||
## Prowler CLI Authentication
|
||||
|
||||
### Using Environment Variables (Required for Secrets)
|
||||
|
||||
Private key material **must** be supplied via environment variables — Prowler does not accept secrets through CLI flags.
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="YOUR-ORG.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
|
||||
# Either of the two — content takes precedence over file when both are set.
|
||||
export OKTA_PRIVATE_KEY_FILE="/secure/path/to/prowler-okta.pem"
|
||||
# or
|
||||
export OKTA_PRIVATE_KEY="$(cat /secure/path/to/prowler-okta.pem)"
|
||||
|
||||
# Optional — defaults to "okta.policies.read"
|
||||
export OKTA_SCOPES="okta.policies.read"
|
||||
|
||||
poetry run python prowler-cli.py okta
|
||||
```
|
||||
|
||||
### Non-Secret CLI Flags
|
||||
|
||||
Non-secret values are also available as CLI flags for ergonomic overrides:
|
||||
|
||||
| Flag | Equivalent env var |
|
||||
|---|---|
|
||||
| `--okta-org-domain` | `OKTA_ORG_DOMAIN` |
|
||||
| `--okta-client-id` | `OKTA_CLIENT_ID` |
|
||||
| `--okta-scopes` | `OKTA_SCOPES` |
|
||||
|
||||
Run a single check directly:
|
||||
|
||||
```bash
|
||||
poetry run python prowler-cli.py okta --check signon_global_session_idle_timeout_15min
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `OktaInvalidOrgDomainError`
|
||||
|
||||
The org domain must be `<org>.okta.com` (or `.oktapreview.com` / `.okta-emea.com` / `.okta-gov.com` / `.okta.mil` / `.okta-miltest.com` / `.trex-govcloud.com`). Pass the bare hostname only — no `https://` scheme, no path, no trailing slash. Custom (vanity) domains are not currently accepted.
|
||||
|
||||
### `OktaPrivateKeyFileError`
|
||||
|
||||
The file at `OKTA_PRIVATE_KEY_FILE` is missing, unreadable, or empty. Confirm the path and that the file contains a non-empty PEM block or JWK JSON document.
|
||||
|
||||
### `OktaInvalidCredentialsError` at provider init
|
||||
|
||||
Prowler validates credentials at startup by listing one sign-on policy. This error indicates the credential material itself was rejected:
|
||||
|
||||
- **`invalid_client`** — the public key registered in Okta does not match the private key on disk. Generate a fresh keypair and try again.
|
||||
|
||||
### `OktaInsufficientPermissionsError` at provider init
|
||||
|
||||
Raised when the credential probe succeeds at the OAuth layer but the request is rejected because the service app lacks the required scope or admin role:
|
||||
|
||||
- **`invalid_scope`** — the `okta.policies.read` scope is not granted on the service app. Grant it from **Okta API Scopes**.
|
||||
- **`Forbidden` / `not authorized`** — the **Read-Only Administrator** role is not assigned to the service app. Assign it from **Admin roles**.
|
||||
|
||||
### `invalid_dpop_proof`
|
||||
|
||||
The org or the service app requires DPoP. The provider always sends DPoP proofs, so this error indicates the SDK could not build a valid proof — typically because the private key on disk does not match the public key uploaded to Okta. Regenerate the keypair.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Implement OAuth 2.0 for an Okta service app](https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/)
|
||||
- [Okta Policy API reference](https://developer.okta.com/docs/api/openapi/okta-management/management/tag/Policy/)
|
||||
- [DISA STIG for Okta (V-273186)](https://stigviewer.com/stigs/okta/)
|
||||
@@ -0,0 +1,144 @@
|
||||
---
|
||||
title: 'Getting Started With Okta on Prowler'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
Prowler for Okta scans an Okta organization for identity and session-management misconfigurations. The provider authenticates as a service application using **OAuth 2.0 with a private-key JWT** (Client Credentials grant) — no end-user login, read-only by scope.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Set up authentication for Okta with the [Okta Authentication](/user-guide/providers/okta/authentication) guide before starting:
|
||||
|
||||
- An Okta organization. The UI examples below use **Identity Engine** terminology such as **Global Session Policy**; Classic Engine exposes the equivalent sign-on policy concepts under older names.
|
||||
- A **Super Administrator** account on that organization for the one-time service-app setup.
|
||||
- An **API Services** app integration in the Okta Admin Console with the `okta.policies.read` scope granted and the **Read-Only Administrator** role assigned.
|
||||
- Python 3.10+ and Prowler 5.27.0 or later installed locally.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Prowler Cloud" icon="cloud" href="#prowler-cloud">
|
||||
Onboard Okta using Prowler Cloud
|
||||
</Card>
|
||||
<Card title="Prowler CLI" icon="terminal" href="#prowler-cli">
|
||||
Onboard Okta using Prowler CLI
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Prowler Cloud
|
||||
|
||||
<Note>
|
||||
Prowler Cloud onboarding for Okta is coming soon. Track the [Prowler GitHub repository](https://github.com/prowler-cloud/prowler) for release updates. Use the [Prowler CLI](#prowler-cli) workflow below in the meantime.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Prowler CLI
|
||||
|
||||
<VersionBadge version="5.27.0" />
|
||||
|
||||
### Step 1: Set Up Authentication
|
||||
|
||||
Follow the [Okta Authentication](/user-guide/providers/okta/authentication) guide to create the service application, generate a keypair, grant scopes, and assign the Read-Only Administrator role. Then export the credentials:
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="acme.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
export OKTA_PRIVATE_KEY_FILE="/secure/path/to/prowler-okta.pem"
|
||||
# Optional — defaults to "okta.policies.read"
|
||||
export OKTA_SCOPES="okta.policies.read"
|
||||
```
|
||||
|
||||
The private key file may contain either a PEM-encoded RSA key or a JWK JSON document.
|
||||
|
||||
#### Supplying the Private Key as Content
|
||||
|
||||
For automated environments where writing the key to disk is not desirable (CI runners, container secrets, etc.), the private key may be passed directly as a string:
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="acme.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
export OKTA_PRIVATE_KEY="$(cat /secure/path/to/prowler-okta.pem)"
|
||||
```
|
||||
|
||||
`OKTA_PRIVATE_KEY` takes precedence over `OKTA_PRIVATE_KEY_FILE` when both are set. The private key is intentionally not exposed as a CLI flag — secrets must be supplied via environment variables only.
|
||||
|
||||
### Step 2: Run the First Scan
|
||||
|
||||
Run a baseline scan after credentials are configured:
|
||||
|
||||
```bash
|
||||
prowler okta
|
||||
```
|
||||
|
||||
Or run a specific check directly:
|
||||
|
||||
```bash
|
||||
prowler okta --check signon_global_session_idle_timeout_15min
|
||||
```
|
||||
|
||||
Prowler prints a summary table; full findings are written to the configured output formats.
|
||||
|
||||
### Step 3: Use a Custom Configuration (Optional)
|
||||
|
||||
Prowler uses a configuration file to customize check thresholds. The Okta configuration currently includes:
|
||||
|
||||
```yaml
|
||||
okta:
|
||||
# okta.signon_global_session_idle_timeout_15min
|
||||
# Defaults to 15 minutes per DISA STIG V-273186.
|
||||
okta_max_session_idle_minutes: 15
|
||||
```
|
||||
|
||||
To use a custom configuration:
|
||||
|
||||
```bash
|
||||
prowler okta --config-file /path/to/config.yaml
|
||||
```
|
||||
|
||||
## Supported Services
|
||||
|
||||
Prowler for Okta includes security checks across the following services:
|
||||
|
||||
| Service | Description |
|
||||
| ----------- | ----------------------------------------------------------------------------------- |
|
||||
| **Sign-On** | Global session policy controls (idle timeout, lifetime, rule priority and ordering) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### STIG Rule Ordering
|
||||
|
||||
The initial check is mapped to DISA STIG `V-273186` / `OKTA-APP-000020`. Prowler implements the STIG procedure as written: the **Default Policy** must have a **Priority 1** rule that is **not** `Default Rule`, and that rule must set **Maximum Okta global session idle time** to 15 minutes or less.
|
||||
|
||||
This is stricter than simply finding the same timeout value somewhere else in the policy set. A compliant custom rule in another policy, or a compliant timeout on the built-in `Default Rule`, does not satisfy this STIG procedure.
|
||||
|
||||
### Default Scopes
|
||||
|
||||
Prowler requests a fixed set of OAuth scopes on every token exchange. The default is a single scope that covers the bundled initial check:
|
||||
|
||||
- `okta.policies.read`
|
||||
|
||||
The service app must have that scope granted in the **Okta API Scopes** tab. When the granted set is narrower than the requested set, the token request fails with an `invalid_scope` error and the scan stops at provider initialization.
|
||||
|
||||
When additional checks are enabled — or when running against a service app that exposes a different scope set — override the default with `OKTA_SCOPES` (comma-separated string for the env var) or `--okta-scopes` (space-separated list for the CLI):
|
||||
|
||||
```bash
|
||||
# Environment variable — comma-separated
|
||||
export OKTA_SCOPES="okta.policies.read,okta.apps.read,okta.users.read"
|
||||
|
||||
# CLI flag — space-separated
|
||||
prowler okta --okta-scopes okta.policies.read okta.apps.read okta.users.read
|
||||
```
|
||||
|
||||
For the full catalog of OAuth scopes exposed by the Okta Management API, refer to the [Okta OAuth 2.0 scopes documentation](https://developer.okta.com/docs/api/oauth2/).
|
||||
|
||||
<Note>
|
||||
As new services and checks land in the Okta provider, the default scope list grows alongside them. Re-check the granted scopes on the service app after each Prowler upgrade and grant any newly required `okta.*.read` scopes in the Admin Console.
|
||||
</Note>
|
||||
|
||||
### Common Errors
|
||||
|
||||
- **`OktaInvalidOrgDomainError`** — the org domain must be `<org>.okta.com` (or `.oktapreview.com` / `.okta-emea.com` / `.okta-gov.com` / `.okta.mil` / `.okta-miltest.com` / `.trex-govcloud.com`). Pass the bare hostname only — no `https://` scheme, no path, no trailing slash.
|
||||
- **`OktaPrivateKeyFileError`** — confirm the file is readable and contains a non-empty PEM or JWK body.
|
||||
- **`OktaInsufficientPermissionsError`** — the credential probe reached Okta but the service app cannot perform the request. The error string carries `invalid_scope`, `Forbidden`, `not authorized`, or `permission`. Fix by granting the missing `okta.*.read` scope from **Okta API Scopes** and confirming the **Read-Only Administrator** role is assigned to the service app.
|
||||
- **`OktaInvalidCredentialsError`** — the credential probe reached Okta but Okta rejected the JWT. Typically the private key on disk does not match the public JWK uploaded to the service app, or the JWT signing parameters are wrong. Regenerate the keypair and re-upload the public JWK.
|
||||
- **Token requests failing for an unknown scope** — the app was granted a narrower scope set than `OKTA_SCOPES` requests. Either narrow `OKTA_SCOPES` or grant the missing scopes in the Admin Console.
|
||||
|
After Width: | Height: | Size: 159 KiB |
|
After Width: | Height: | Size: 134 KiB |
|
After Width: | Height: | Size: 173 KiB |
|
After Width: | Height: | Size: 127 KiB |
|
After Width: | Height: | Size: 83 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 216 KiB |
|
After Width: | Height: | Size: 56 KiB |
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: "Getting Started With Scaleway on Prowler"
|
||||
---
|
||||
|
||||
Prowler for Scaleway scans IAM resources in your Scaleway organization for security misconfigurations. The current release ships one check that flags API keys still owned by the account root user.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A Scaleway organization with IAM access.
|
||||
2. A Scaleway API key with at least the `IAMReadOnly` policy bound to a dedicated IAM user (do not use the account root user).
|
||||
3. Your organization ID (visible at the top right of the Scaleway console).
|
||||
|
||||
## Authentication
|
||||
|
||||
Prowler reads credentials from the standard Scaleway environment variables:
|
||||
|
||||
| Variable | Purpose |
|
||||
|---|---|
|
||||
| `SCW_ACCESS_KEY` | API key access key |
|
||||
| `SCW_SECRET_KEY` | API key secret key |
|
||||
| `SCW_DEFAULT_ORGANIZATION_ID` | Optional, required when the key bearer is an application |
|
||||
| `SCW_DEFAULT_PROJECT_ID` | Optional, default project for project-scoped resources |
|
||||
| `SCW_DEFAULT_REGION` | Optional, defaults to `fr-par` |
|
||||
|
||||
Alternatively, pass them as CLI flags (`--access-key`, `--secret-key`, `--organization-id`, `--project-id`, `--region`). The CLI emits a warning when secrets are passed via the command line; environment variables are preferred.
|
||||
|
||||
## Run a scan
|
||||
|
||||
```bash
|
||||
export SCW_ACCESS_KEY="SCW..."
|
||||
export SCW_SECRET_KEY="..."
|
||||
export SCW_DEFAULT_ORGANIZATION_ID="..."
|
||||
|
||||
prowler scaleway
|
||||
```
|
||||
|
||||
To run only the IAM root-key check:
|
||||
|
||||
```bash
|
||||
prowler scaleway --check iam_no_root_api_keys
|
||||
```
|
||||
|
||||
## Checks shipped
|
||||
|
||||
| Check ID | Severity | Description |
|
||||
|---|---|---|
|
||||
| `iam_no_root_api_keys` | Critical | Fails when any Scaleway IAM API key is still owned by the account root user. |
|
||||
|
||||
## Required Scaleway permissions
|
||||
|
||||
The API key bearer needs read access to the IAM API in order to list users and API keys. The `IAMReadOnly` policy is sufficient. Refer to the [Scaleway IAM policy reference](https://www.scaleway.com/en/docs/identity-and-access-management/iam/reference-content/permission-sets/) for the full list of permissions.
|
||||
@@ -4,6 +4,10 @@ All notable changes to the **Prowler MCP Server** are documented in this file.
|
||||
|
||||
## [0.7.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- MCP Server tools for Prowler Finding Groups Management [(#11140)](https://github.com/prowler-cloud/prowler/pull/11140)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- `cryptography` from 46.0.1 to 47.0.0 (transitive) for CVE-2026-39892 and CVE-2026-26007 / CVE-2026-34073 [(#10978)](https://github.com/prowler-cloud/prowler/pull/10978)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
Full access to Prowler Cloud platform and self-managed Prowler App for:
|
||||
- **Findings Analysis**: Query, filter, and analyze security findings across all your cloud environments
|
||||
- **Finding Groups Analysis**: Triage findings grouped by check ID and drill down into affected resources
|
||||
- **Provider Management**: Create, configure, and manage your configured Prowler providers (AWS, Azure, GCP, etc.)
|
||||
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments
|
||||
- **Resource Inventory**: Search and view detailed information about your audited resources
|
||||
@@ -56,13 +57,21 @@ Prowler MCP Server can be used in three ways:
|
||||
- Managed and maintained by Prowler team
|
||||
- Always up-to-date
|
||||
|
||||
Install a reviewed version of `mcp-remote` in a dedicated local workspace first. Avoid running `npx mcp-remote` directly because it can download and execute a new package version on each run.
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp",
|
||||
"--header",
|
||||
"Authorization: Bearer pk_YOUR_API_KEY_HERE"
|
||||
|
||||
@@ -0,0 +1,292 @@
|
||||
"""Pydantic models for Prowler Finding Groups responses."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
|
||||
|
||||
|
||||
FindingStatus = Literal["FAIL", "PASS", "MANUAL"]
|
||||
FindingSeverity = Literal["critical", "high", "medium", "low", "informational"]
|
||||
FindingDelta = Literal["new", "changed"]
|
||||
|
||||
|
||||
def _attributes(data: dict) -> dict:
|
||||
return data.get("attributes", {})
|
||||
|
||||
|
||||
def _counter(attributes: dict, key: str) -> int:
|
||||
return attributes.get(key) or 0
|
||||
|
||||
|
||||
def _simplified_group_kwargs(data: dict) -> dict:
|
||||
attributes = _attributes(data)
|
||||
return {
|
||||
"check_id": attributes.get("check_id", data.get("id", "")),
|
||||
"check_title": attributes.get("check_title"),
|
||||
"severity": attributes.get("severity", "informational"),
|
||||
"status": attributes.get("status", "MANUAL"),
|
||||
"muted": attributes.get("muted", False),
|
||||
"impacted_providers": attributes.get("impacted_providers") or [],
|
||||
"resources_fail": _counter(attributes, "resources_fail"),
|
||||
"resources_total": _counter(attributes, "resources_total"),
|
||||
"pass_count": _counter(attributes, "pass_count"),
|
||||
"fail_count": _counter(attributes, "fail_count"),
|
||||
"manual_count": _counter(attributes, "manual_count"),
|
||||
"muted_count": _counter(attributes, "muted_count"),
|
||||
"new_count": _counter(attributes, "new_count"),
|
||||
"changed_count": _counter(attributes, "changed_count"),
|
||||
"first_seen_at": attributes.get("first_seen_at"),
|
||||
"last_seen_at": attributes.get("last_seen_at"),
|
||||
"failing_since": attributes.get("failing_since"),
|
||||
}
|
||||
|
||||
|
||||
class SimplifiedFindingGroup(MinimalSerializerMixin):
|
||||
"""Finding group summary optimized for browsing many checks."""
|
||||
|
||||
check_id: str = Field(description="Public check ID that identifies this group")
|
||||
check_title: str | None = Field(
|
||||
default=None, description="Human-readable check title"
|
||||
)
|
||||
severity: FindingSeverity = Field(description="Highest severity in the group")
|
||||
status: FindingStatus = Field(description="Aggregated finding group status")
|
||||
muted: bool = Field(
|
||||
description="Whether all findings in this group are muted or accepted"
|
||||
)
|
||||
impacted_providers: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Provider types impacted by this finding group",
|
||||
)
|
||||
resources_fail: int = Field(
|
||||
description="Number of non-muted failing resources in this group", ge=0
|
||||
)
|
||||
resources_total: int = Field(
|
||||
description="Total number of resources in this group", ge=0
|
||||
)
|
||||
pass_count: int = Field(
|
||||
description="Number of non-muted PASS findings in this group", ge=0
|
||||
)
|
||||
fail_count: int = Field(
|
||||
description="Number of non-muted FAIL findings in this group", ge=0
|
||||
)
|
||||
manual_count: int = Field(
|
||||
description="Number of non-muted MANUAL findings in this group", ge=0
|
||||
)
|
||||
muted_count: int = Field(description="Total muted findings in this group", ge=0)
|
||||
new_count: int = Field(description="Number of new non-muted findings", ge=0)
|
||||
changed_count: int = Field(description="Number of changed non-muted findings", ge=0)
|
||||
first_seen_at: str | None = Field(
|
||||
default=None, description="First time this group was detected"
|
||||
)
|
||||
last_seen_at: str | None = Field(
|
||||
default=None, description="Last time this group was detected"
|
||||
)
|
||||
failing_since: str | None = Field(
|
||||
default=None, description="First time this group started failing"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "SimplifiedFindingGroup":
|
||||
"""Transform JSON:API finding group response to simplified format."""
|
||||
return cls(**_simplified_group_kwargs(data))
|
||||
|
||||
|
||||
class DetailedFindingGroup(SimplifiedFindingGroup):
|
||||
"""Finding group with complete counters and descriptive context."""
|
||||
|
||||
check_description: str | None = Field(
|
||||
default=None, description="Description of the check behind this group"
|
||||
)
|
||||
pass_muted_count: int = Field(description="Muted PASS findings", ge=0)
|
||||
fail_muted_count: int = Field(description="Muted FAIL findings", ge=0)
|
||||
manual_muted_count: int = Field(description="Muted MANUAL findings", ge=0)
|
||||
new_fail_count: int = Field(description="New non-muted FAIL findings", ge=0)
|
||||
new_fail_muted_count: int = Field(description="New muted FAIL findings", ge=0)
|
||||
new_pass_count: int = Field(description="New non-muted PASS findings", ge=0)
|
||||
new_pass_muted_count: int = Field(description="New muted PASS findings", ge=0)
|
||||
new_manual_count: int = Field(description="New non-muted MANUAL findings", ge=0)
|
||||
new_manual_muted_count: int = Field(description="New muted MANUAL findings", ge=0)
|
||||
changed_fail_count: int = Field(description="Changed non-muted FAIL findings", ge=0)
|
||||
changed_fail_muted_count: int = Field(
|
||||
description="Changed muted FAIL findings", ge=0
|
||||
)
|
||||
changed_pass_count: int = Field(description="Changed non-muted PASS findings", ge=0)
|
||||
changed_pass_muted_count: int = Field(
|
||||
description="Changed muted PASS findings", ge=0
|
||||
)
|
||||
changed_manual_count: int = Field(
|
||||
description="Changed non-muted MANUAL findings", ge=0
|
||||
)
|
||||
changed_manual_muted_count: int = Field(
|
||||
description="Changed muted MANUAL findings", ge=0
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "DetailedFindingGroup":
|
||||
"""Transform JSON:API finding group response to detailed format."""
|
||||
attributes = _attributes(data)
|
||||
|
||||
return cls(
|
||||
**_simplified_group_kwargs(data),
|
||||
check_description=attributes.get("check_description"),
|
||||
pass_muted_count=_counter(attributes, "pass_muted_count"),
|
||||
fail_muted_count=_counter(attributes, "fail_muted_count"),
|
||||
manual_muted_count=_counter(attributes, "manual_muted_count"),
|
||||
new_fail_count=_counter(attributes, "new_fail_count"),
|
||||
new_fail_muted_count=_counter(attributes, "new_fail_muted_count"),
|
||||
new_pass_count=_counter(attributes, "new_pass_count"),
|
||||
new_pass_muted_count=_counter(attributes, "new_pass_muted_count"),
|
||||
new_manual_count=_counter(attributes, "new_manual_count"),
|
||||
new_manual_muted_count=_counter(attributes, "new_manual_muted_count"),
|
||||
changed_fail_count=_counter(attributes, "changed_fail_count"),
|
||||
changed_fail_muted_count=_counter(attributes, "changed_fail_muted_count"),
|
||||
changed_pass_count=_counter(attributes, "changed_pass_count"),
|
||||
changed_pass_muted_count=_counter(attributes, "changed_pass_muted_count"),
|
||||
changed_manual_count=_counter(attributes, "changed_manual_count"),
|
||||
changed_manual_muted_count=_counter(
|
||||
attributes, "changed_manual_muted_count"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupsListResponse(MinimalSerializerMixin):
|
||||
"""Paginated response for finding group list queries."""
|
||||
|
||||
groups: list[SimplifiedFindingGroup] = Field(
|
||||
description="Finding groups matching the query"
|
||||
)
|
||||
total_num_groups: int = Field(
|
||||
description="Total groups matching the query across all pages", ge=0
|
||||
)
|
||||
total_num_pages: int = Field(description="Total pages available", ge=0)
|
||||
current_page: int = Field(description="Current page number", ge=1)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "FindingGroupsListResponse":
|
||||
"""Transform JSON:API list response to simplified format."""
|
||||
pagination = response.get("meta", {}).get("pagination", {})
|
||||
groups = [
|
||||
SimplifiedFindingGroup.from_api_response(item)
|
||||
for item in response.get("data", [])
|
||||
]
|
||||
|
||||
return cls(
|
||||
groups=groups,
|
||||
total_num_groups=pagination.get("count", len(groups)),
|
||||
total_num_pages=pagination.get("pages", 1),
|
||||
current_page=pagination.get("page", 1),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResourceInfo(MinimalSerializerMixin):
|
||||
"""Nested resource information for a finding group row."""
|
||||
|
||||
uid: str = Field(description="Provider-native resource UID")
|
||||
name: str = Field(description="Resource name")
|
||||
service: str = Field(description="Cloud service")
|
||||
region: str = Field(description="Cloud region")
|
||||
type: str = Field(description="Resource type")
|
||||
resource_group: str | None = Field(
|
||||
default=None, description="Provider resource group or equivalent"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupResourceInfo":
|
||||
"""Transform nested resource data to simplified format."""
|
||||
return cls(
|
||||
uid=data.get("uid", ""),
|
||||
name=data.get("name", ""),
|
||||
service=data.get("service", ""),
|
||||
region=data.get("region", ""),
|
||||
type=data.get("type", ""),
|
||||
resource_group=data.get("resource_group"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupProviderInfo(MinimalSerializerMixin):
|
||||
"""Nested provider information for a finding group resource row."""
|
||||
|
||||
type: str = Field(description="Provider type")
|
||||
uid: str = Field(description="Provider-native account or subscription ID")
|
||||
alias: str | None = Field(default=None, description="Provider alias")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupProviderInfo":
|
||||
"""Transform nested provider data to simplified format."""
|
||||
return cls(
|
||||
type=data.get("type", ""),
|
||||
uid=data.get("uid", ""),
|
||||
alias=data.get("alias"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResource(MinimalSerializerMixin):
|
||||
"""Resource row affected by a finding group."""
|
||||
|
||||
id: str = Field(description="Row identifier for this finding group resource")
|
||||
resource: FindingGroupResourceInfo = Field(description="Affected resource")
|
||||
provider: FindingGroupProviderInfo = Field(description="Affected provider")
|
||||
finding_id: str = Field(
|
||||
description="Finding UUID to use with prowler_app_get_finding_details"
|
||||
)
|
||||
status: FindingStatus = Field(description="Finding status for this resource")
|
||||
severity: FindingSeverity = Field(description="Finding severity")
|
||||
muted: bool = Field(description="Whether the finding is muted")
|
||||
delta: FindingDelta | None = Field(default=None, description="Change status")
|
||||
first_seen_at: str | None = Field(default=None, description="First seen time")
|
||||
last_seen_at: str | None = Field(default=None, description="Last seen time")
|
||||
muted_reason: str | None = Field(default=None, description="Mute reason")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupResource":
|
||||
"""Transform JSON:API finding group resource response."""
|
||||
attributes = _attributes(data)
|
||||
|
||||
return cls(
|
||||
id=data.get("id", ""),
|
||||
resource=FindingGroupResourceInfo.from_api_response(
|
||||
attributes.get("resource") or {}
|
||||
),
|
||||
provider=FindingGroupProviderInfo.from_api_response(
|
||||
attributes.get("provider") or {}
|
||||
),
|
||||
finding_id=str(attributes.get("finding_id", "")),
|
||||
status=attributes.get("status", "MANUAL"),
|
||||
severity=attributes.get("severity", "informational"),
|
||||
muted=attributes.get("muted", False),
|
||||
delta=attributes.get("delta"),
|
||||
first_seen_at=attributes.get("first_seen_at"),
|
||||
last_seen_at=attributes.get("last_seen_at"),
|
||||
muted_reason=attributes.get("muted_reason"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResourcesListResponse(MinimalSerializerMixin):
|
||||
"""Paginated response for finding group resource queries."""
|
||||
|
||||
resources: list[FindingGroupResource] = Field(
|
||||
description="Resources matching the finding group query"
|
||||
)
|
||||
total_num_resources: int = Field(
|
||||
description="Total resources matching the query across all pages", ge=0
|
||||
)
|
||||
total_num_pages: int = Field(description="Total pages available", ge=0)
|
||||
current_page: int = Field(description="Current page number", ge=1)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "FindingGroupResourcesListResponse":
|
||||
"""Transform JSON:API resource list response to simplified format."""
|
||||
pagination = response.get("meta", {}).get("pagination", {})
|
||||
resources = [
|
||||
FindingGroupResource.from_api_response(item)
|
||||
for item in response.get("data", [])
|
||||
]
|
||||
|
||||
return cls(
|
||||
resources=resources,
|
||||
total_num_resources=pagination.get("count", len(resources)),
|
||||
total_num_pages=pagination.get("pages", 1),
|
||||
current_page=pagination.get("page", 1),
|
||||
)
|
||||
@@ -0,0 +1,471 @@
|
||||
"""Finding Groups tools for Prowler App MCP Server.
|
||||
|
||||
This module provides read-only tools for finding group triage and drill-downs.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal
|
||||
from urllib.parse import quote
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.finding_groups import (
|
||||
DetailedFindingGroup,
|
||||
FindingGroupResourcesListResponse,
|
||||
FindingGroupsListResponse,
|
||||
)
|
||||
from prowler_mcp_server.prowler_app.tools.base import BaseTool
|
||||
|
||||
|
||||
StatusFilter = Literal["FAIL", "PASS", "MANUAL"]
|
||||
SeverityFilter = Literal["critical", "high", "medium", "low", "informational"]
|
||||
DeltaFilter = Literal["new", "changed"]
|
||||
|
||||
GROUP_DETAIL_FIELDS = (
|
||||
"check_id,check_title,check_description,severity,status,muted,"
|
||||
"impacted_providers,resources_fail,resources_total,pass_count,fail_count,"
|
||||
"manual_count,pass_muted_count,fail_muted_count,manual_muted_count,"
|
||||
"muted_count,new_count,changed_count,new_fail_count,new_fail_muted_count,"
|
||||
"new_pass_count,new_pass_muted_count,new_manual_count,new_manual_muted_count,"
|
||||
"changed_fail_count,changed_fail_muted_count,changed_pass_count,"
|
||||
"changed_pass_muted_count,changed_manual_count,changed_manual_muted_count,"
|
||||
"first_seen_at,last_seen_at,failing_since"
|
||||
)
|
||||
|
||||
GROUP_LIST_FIELDS = (
|
||||
"check_id,check_title,severity,status,muted,impacted_providers,"
|
||||
"resources_fail,resources_total,pass_count,fail_count,manual_count,"
|
||||
"muted_count,new_count,changed_count,first_seen_at,last_seen_at,failing_since"
|
||||
)
|
||||
|
||||
RESOURCE_FIELDS = (
|
||||
"resource,provider,finding_id,status,severity,muted,delta,"
|
||||
"first_seen_at,last_seen_at,muted_reason"
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupsTools(BaseTool):
|
||||
"""Tools for Finding Groups operations."""
|
||||
|
||||
@staticmethod
|
||||
def _bool_value(value: bool | str) -> bool:
|
||||
"""Normalize bool-like MCP client values."""
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return value.lower() == "true"
|
||||
|
||||
@staticmethod
|
||||
def _group_endpoint(date_range: tuple[str, str] | None) -> str:
|
||||
return "/finding-groups/latest" if date_range is None else "/finding-groups"
|
||||
|
||||
@staticmethod
|
||||
def _resource_endpoint(check_id: str, date_range: tuple[str, str] | None) -> str:
|
||||
escaped_check_id = quote(check_id, safe="")
|
||||
if date_range is None:
|
||||
return f"/finding-groups/latest/{escaped_check_id}/resources"
|
||||
return f"/finding-groups/{escaped_check_id}/resources"
|
||||
|
||||
def _base_date_params(
|
||||
self, date_from: str | None, date_to: str | None
|
||||
) -> tuple[tuple[str, str] | None, dict[str, Any]]:
|
||||
date_range = self.api_client.normalize_date_range(
|
||||
date_from, date_to, max_days=2
|
||||
)
|
||||
if date_range is None:
|
||||
return None, {}
|
||||
|
||||
return date_range, {
|
||||
"filter[inserted_at__gte]": date_range[0],
|
||||
"filter[inserted_at__lte]": date_range[1],
|
||||
}
|
||||
|
||||
def _apply_common_filters(
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
provider: list[str],
|
||||
provider_type: list[str],
|
||||
provider_uid: list[str],
|
||||
provider_alias: str | None,
|
||||
region: list[str],
|
||||
service: list[str],
|
||||
resource_type: list[str],
|
||||
resource_name: str | None,
|
||||
resource_uid: str | None,
|
||||
resource_group: list[str],
|
||||
category: list[str],
|
||||
check_id: list[str],
|
||||
check_title: str | None,
|
||||
severity: list[SeverityFilter],
|
||||
status: list[StatusFilter],
|
||||
muted: bool | str | None,
|
||||
delta: list[DeltaFilter],
|
||||
) -> None:
|
||||
if provider:
|
||||
params["filter[provider__in]"] = provider
|
||||
if provider_type:
|
||||
params["filter[provider_type__in]"] = provider_type
|
||||
if provider_uid:
|
||||
params["filter[provider_uid__in]"] = provider_uid
|
||||
if provider_alias:
|
||||
params["filter[provider_alias__icontains]"] = provider_alias
|
||||
if region:
|
||||
params["filter[region__in]"] = region
|
||||
if service:
|
||||
params["filter[service__in]"] = service
|
||||
if resource_type:
|
||||
params["filter[resource_type__in]"] = resource_type
|
||||
if resource_name:
|
||||
params["filter[resource_name__icontains]"] = resource_name
|
||||
if resource_uid:
|
||||
params["filter[resource_uid__icontains]"] = resource_uid
|
||||
if resource_group:
|
||||
params["filter[resource_groups__in]"] = resource_group
|
||||
if category:
|
||||
params["filter[category__in]"] = category
|
||||
if check_id:
|
||||
params["filter[check_id__in]"] = check_id
|
||||
if check_title:
|
||||
params["filter[check_title__icontains]"] = check_title
|
||||
if severity:
|
||||
params["filter[severity__in]"] = severity
|
||||
if status:
|
||||
params["filter[status__in]"] = status
|
||||
if muted is not None:
|
||||
params["filter[muted]"] = self._bool_value(muted)
|
||||
if delta:
|
||||
params["filter[delta__in]"] = delta
|
||||
|
||||
async def list_finding_groups(
|
||||
self,
|
||||
provider: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider UUIDs. Multiple values allowed. If empty, all visible providers are returned.",
|
||||
),
|
||||
provider_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider type. Multiple values allowed, such as aws, azure, gcp, kubernetes, github, or m365.",
|
||||
),
|
||||
provider_uid: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider-native account, subscription, or project IDs. Multiple values allowed.",
|
||||
),
|
||||
provider_alias: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by provider alias/name using partial matching.",
|
||||
),
|
||||
region: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud regions. Multiple values allowed.",
|
||||
),
|
||||
service: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud services. Multiple values allowed.",
|
||||
),
|
||||
resource_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource types. Multiple values allowed.",
|
||||
),
|
||||
resource_name: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource name using partial matching.",
|
||||
),
|
||||
resource_uid: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource UID using partial matching.",
|
||||
),
|
||||
resource_group: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource group values. Multiple values allowed.",
|
||||
),
|
||||
category: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by finding categories. Multiple values allowed.",
|
||||
),
|
||||
check_id: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by check IDs. Multiple values allowed.",
|
||||
),
|
||||
check_title: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by check title using partial matching.",
|
||||
),
|
||||
severity: list[SeverityFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by aggregated severity. Empty returns all severities.",
|
||||
),
|
||||
status: list[StatusFilter] = Field(
|
||||
default=["FAIL"],
|
||||
description="Filter by aggregated status. Default returns failing groups. Pass [] to return all statuses.",
|
||||
),
|
||||
muted: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Filter by fully muted group state. Accepts true/false.",
|
||||
),
|
||||
include_muted: bool | str = Field(
|
||||
default=False,
|
||||
description="When false, excludes fully muted groups. Set true to include fully muted groups.",
|
||||
),
|
||||
delta: list[DeltaFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by group delta values: new or changed.",
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
sort: str | None = Field(
|
||||
default=None,
|
||||
description="Optional sort expression supported by the finding-groups API, such as -fail_count,-severity,check_id.",
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50, description="Number of groups to return per page"
|
||||
),
|
||||
page_number: int = Field(
|
||||
default=1, description="Page number to retrieve (1-indexed)"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""List finding groups aggregated by check ID.
|
||||
|
||||
Default behavior returns the latest non-muted FAIL groups for fast triage.
|
||||
Without dates this uses `/finding-groups/latest`. With `date_from` or
|
||||
`date_to`, this uses `/finding-groups` with a maximum 2-day date window.
|
||||
|
||||
Use this tool to find noisy or high-impact checks, then call
|
||||
prowler_app_get_finding_group_details for complete counters or
|
||||
prowler_app_list_finding_group_resources to drill into affected resources.
|
||||
"""
|
||||
try:
|
||||
self.api_client.validate_page_size(page_size)
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._group_endpoint(date_range)
|
||||
|
||||
self._apply_common_filters(
|
||||
params,
|
||||
provider,
|
||||
provider_type,
|
||||
provider_uid,
|
||||
provider_alias,
|
||||
region,
|
||||
service,
|
||||
resource_type,
|
||||
resource_name,
|
||||
resource_uid,
|
||||
resource_group,
|
||||
category,
|
||||
check_id,
|
||||
check_title,
|
||||
severity,
|
||||
status,
|
||||
muted,
|
||||
delta,
|
||||
)
|
||||
|
||||
params["filter[include_muted]"] = self._bool_value(include_muted)
|
||||
params["page[size]"] = page_size
|
||||
params["page[number]"] = page_number
|
||||
params["fields[finding-groups]"] = GROUP_LIST_FIELDS
|
||||
if sort:
|
||||
params["sort"] = sort
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
response = FindingGroupsListResponse.from_api_response(api_response)
|
||||
return response.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error listing finding groups: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def get_finding_group_details(
|
||||
self,
|
||||
check_id: str = Field(
|
||||
description="Public check ID that identifies the finding group. This is not a UUID."
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""Get complete details for one finding group by exact check ID.
|
||||
|
||||
Uses `filter[check_id]` exact matching against latest data by default,
|
||||
or historical data when dates are provided. Fully muted groups are
|
||||
included by default so accepted risk does not look like a missing group.
|
||||
"""
|
||||
try:
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._group_endpoint(date_range)
|
||||
|
||||
params.update(
|
||||
{
|
||||
"filter[check_id]": check_id,
|
||||
"filter[include_muted]": True,
|
||||
"page[size]": 1,
|
||||
"page[number]": 1,
|
||||
"fields[finding-groups]": GROUP_DETAIL_FIELDS,
|
||||
}
|
||||
)
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
data = api_response.get("data", [])
|
||||
|
||||
if not data:
|
||||
return {
|
||||
"error": f"Finding group '{check_id}' not found.",
|
||||
"status": "not_found",
|
||||
}
|
||||
|
||||
group = DetailedFindingGroup.from_api_response(data[0])
|
||||
return group.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting finding group details: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def list_finding_group_resources(
|
||||
self,
|
||||
check_id: str = Field(
|
||||
description="Public check ID that identifies the finding group. This is not a UUID."
|
||||
),
|
||||
provider: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider UUIDs. Multiple values allowed.",
|
||||
),
|
||||
provider_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider type. Multiple values allowed.",
|
||||
),
|
||||
provider_uid: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider-native account, subscription, or project IDs. Multiple values allowed.",
|
||||
),
|
||||
provider_alias: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by provider alias/name using partial matching.",
|
||||
),
|
||||
region: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud regions. Multiple values allowed.",
|
||||
),
|
||||
service: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud services. Multiple values allowed.",
|
||||
),
|
||||
resource_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource types. Multiple values allowed.",
|
||||
),
|
||||
resource_name: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource name using partial matching.",
|
||||
),
|
||||
resource_uid: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource UID using partial matching.",
|
||||
),
|
||||
resource_group: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource group values. Multiple values allowed.",
|
||||
),
|
||||
category: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by finding categories. Multiple values allowed.",
|
||||
),
|
||||
severity: list[SeverityFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by severity. Empty returns all severities.",
|
||||
),
|
||||
status: list[StatusFilter] = Field(
|
||||
default=["FAIL"],
|
||||
description="Filter by status. Default returns failing resources. Pass [] to return all statuses.",
|
||||
),
|
||||
muted: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Filter by muted state. Accepts true/false. Overrides include_muted when provided.",
|
||||
),
|
||||
include_muted: bool | str = Field(
|
||||
default=False,
|
||||
description="When false, returns only actionable unmuted resources by applying muted=false. Set true to include muted and unmuted resources.",
|
||||
),
|
||||
delta: list[DeltaFilter] = Field(
|
||||
default=[], description="Filter by delta values: new or changed."
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
sort: str | None = Field(
|
||||
default=None,
|
||||
description="Optional sort expression supported by the finding group resources API.",
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50, description="Number of resources to return per page"
|
||||
),
|
||||
page_number: int = Field(
|
||||
default=1, description="Page number to retrieve (1-indexed)"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""List resources affected by a finding group.
|
||||
|
||||
Without dates this uses `/finding-groups/latest/{check_id}/resources`.
|
||||
With `date_from` or `date_to`, this uses
|
||||
`/finding-groups/{check_id}/resources` with a maximum 2-day date window.
|
||||
|
||||
Default behavior returns FAIL, unmuted resources so the result is
|
||||
actionable. Set `include_muted=True` to include accepted/suppressed
|
||||
resources too. Each row includes nested resource and provider data plus
|
||||
`finding_id`. Use `prowler_app_get_finding_details(finding_id)` to
|
||||
retrieve complete remediation guidance for a specific resource finding.
|
||||
"""
|
||||
try:
|
||||
self.api_client.validate_page_size(page_size)
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._resource_endpoint(check_id, date_range)
|
||||
|
||||
if muted is None and not self._bool_value(include_muted):
|
||||
muted = False
|
||||
|
||||
self._apply_common_filters(
|
||||
params,
|
||||
provider,
|
||||
provider_type,
|
||||
provider_uid,
|
||||
provider_alias,
|
||||
region,
|
||||
service,
|
||||
resource_type,
|
||||
resource_name,
|
||||
resource_uid,
|
||||
resource_group,
|
||||
category,
|
||||
[],
|
||||
None,
|
||||
severity,
|
||||
status,
|
||||
muted,
|
||||
delta,
|
||||
)
|
||||
|
||||
params["page[size]"] = page_size
|
||||
params["page[number]"] = page_number
|
||||
params["fields[finding-group-resources]"] = RESOURCE_FIELDS
|
||||
if sort:
|
||||
params["sort"] = sort
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
response = FindingGroupResourcesListResponse.from_api_response(api_response)
|
||||
return response.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error listing finding group resources: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "about-time"
|
||||
@@ -12,6 +12,19 @@ files = [
|
||||
{file = "about_time-4.2.1-py3-none-any.whl", hash = "sha256:8bbf4c75fe13cbd3d72f49a03b02c5c7dca32169b6d49117c257e7eb3eaee341"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aenum"
|
||||
version = "3.1.17"
|
||||
description = "Advanced Enumerations (compatible with Python's stdlib Enum), NamedTuples, and NamedConstants"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aenum-3.1.17-py2-none-any.whl", hash = "sha256:0dad0421b2fbe30e3fb623b2a0a23eff823407df53829d6a72595e7f76f3d872"},
|
||||
{file = "aenum-3.1.17-py3-none-any.whl", hash = "sha256:8b883a37a04e74cc838ac442bdd28c266eae5bbf13e1342c7ef123ed25230139"},
|
||||
{file = "aenum-3.1.17.tar.gz", hash = "sha256:a969a4516b194895de72c875ece355f17c0d272146f7fda346ef74f93cf4d5ba"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiofiles"
|
||||
version = "24.1.0"
|
||||
@@ -3144,6 +3157,22 @@ files = [
|
||||
[package.dependencies]
|
||||
referencing = ">=0.31.0"
|
||||
|
||||
[[package]]
|
||||
name = "jwcrypto"
|
||||
version = "1.5.7"
|
||||
description = "Implementation of JOSE Web standards"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jwcrypto-1.5.7-py3-none-any.whl", hash = "sha256:729463fefe28b6de5cf1ebfda3e94f1a1b41d2799148ef98a01cb9678ebe2bb0"},
|
||||
{file = "jwcrypto-1.5.7.tar.gz", hash = "sha256:70204d7cca406eda8c82352e3c41ba2d946610dafd19e54403f0a1f4f18633c6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.4"
|
||||
typing_extensions = ">=4.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "keystoneauth1"
|
||||
version = "5.13.0"
|
||||
@@ -4112,6 +4141,35 @@ urllib3 = {version = ">=2.6.3", markers = "python_version >= \"3.10.0\""}
|
||||
[package.extras]
|
||||
adk = ["docstring-parser (>=0.16) ; python_version >= \"3.10\" and python_version < \"4\"", "mcp (>=1.6.0) ; python_version >= \"3.10\" and python_version < \"4\"", "pydantic (>=2.10.6) ; python_version >= \"3.10\" and python_version < \"4\"", "rich (>=13.9.4) ; python_version >= \"3.10\" and python_version < \"4\""]
|
||||
|
||||
[[package]]
|
||||
name = "okta"
|
||||
version = "3.4.2"
|
||||
description = "Python SDK for the Okta Management API"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "okta-3.4.2-py3-none-any.whl", hash = "sha256:b67bcff31de65223c5848894a202153236d0c99e3a8541a54bf7065f81676637"},
|
||||
{file = "okta-3.4.2.tar.gz", hash = "sha256:b05201056f3f028c5d2d16394f9b47024a689080f5a993c11d4d80f0e1b5ba1e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aenum = ">=3.1.16"
|
||||
aiohttp = ">=3.13.4"
|
||||
blinker = ">=1.9.0"
|
||||
jwcrypto = ">=1.5.6"
|
||||
pycryptodomex = ">=3.23.0"
|
||||
pydantic = ">=2.11.3"
|
||||
pydash = ">=8.0.6"
|
||||
PyJWT = ">=2.12.0"
|
||||
python-dateutil = ">=2.9.0.post0"
|
||||
PyYAML = ">=6.0.3"
|
||||
requests = ">=2.33.0"
|
||||
xmltodict = ">=1.0.2"
|
||||
|
||||
[package.extras]
|
||||
images = ["pillow (>=9.0.0,<12)"]
|
||||
|
||||
[[package]]
|
||||
name = "openapi-schema-validator"
|
||||
version = "0.6.3"
|
||||
@@ -4752,6 +4810,57 @@ files = [
|
||||
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pycryptodomex"
|
||||
version = "3.23.0"
|
||||
description = "Cryptographic library for Python"
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:add243d204e125f189819db65eed55e6b4713f70a7e9576c043178656529cec7"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1c6d919fc8429e5cb228ba8c0d4d03d202a560b421c14867a65f6042990adc8e"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1c3a65ad441746b250d781910d26b7ed0a396733c6f2dbc3327bd7051ec8a541"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-win32.whl", hash = "sha256:47f6d318fe864d02d5e59a20a18834819596c4ed1d3c917801b22b92b3ffa648"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:d9825410197a97685d6a1fa2a86196430b01877d64458a20e95d4fd00d739a08"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:267a3038f87a8565bd834317dbf053a02055915acf353bf42ededb9edaf72010"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7b37e08e3871efe2187bc1fd9320cc81d87caf19816c648f24443483005ff886"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:91979028227543010d7b2ba2471cf1d1e398b3f183cb105ac584df0c36dac28d"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8962204c47464d5c1c4038abeadd4514a133b28748bcd9fa5b6d62e3cec6fa"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a33986a0066860f7fcf7c7bd2bc804fa90e434183645595ae7b33d01f3c91ed8"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7947ab8d589e3178da3d7cdeabe14f841b391e17046954f2fbcd941705762b5"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c25e30a20e1b426e1f0fa00131c516f16e474204eee1139d1603e132acffc314"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:da4fa650cef02db88c2b98acc5434461e027dce0ae8c22dd5a69013eaf510006"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58b851b9effd0d072d4ca2e4542bf2a4abcf13c82a29fd2c93ce27ee2a2e9462"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:a9d446e844f08299236780f2efa9898c818fe7e02f17263866b8550c7d5fb328"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bc65bdd9fc8de7a35a74cab1c898cab391a4add33a8fe740bda00f5976ca4708"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c885da45e70139464f082018ac527fdaad26f1657a99ee13eecdce0f0ca24ab4"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51"},
|
||||
{file = "pycryptodomex-3.23.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:febec69c0291efd056c65691b6d9a339f8b4bc43c6635b8699471248fe897fea"},
|
||||
{file = "pycryptodomex-3.23.0-pp27-pypy_73-win32.whl", hash = "sha256:c84b239a1f4ec62e9c789aafe0543f0594f0acd90c8d9e15bcece3efe55eca66"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ebfff755c360d674306e5891c564a274a47953562b42fb74a5c25b8fc1fb1cb5"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca54f4bb349d45afc17e3011ed4264ef1cc9e266699874cdd1349c504e64798"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2596e643d4365e14d0879dc5aafe6355616c61c2176009270f3048f6d9a61f"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdfac7cda115bca3a5abb2f9e43bc2fb66c2b65ab074913643803ca7083a79ea"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:14c37aaece158d0ace436f76a7bb19093db3b4deade9797abfc39ec6cd6cc2fe"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7de1e40a41a5d7f1ac42b6569b10bcdded34339950945948529067d8426d2785"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bffc92138d75664b6d543984db7893a628559b9e78658563b0395e2a5fb47ed9"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df027262368334552db2c0ce39706b3fb32022d1dce34673d0f9422df004b96a"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e79f1aaff5a3a374e92eb462fa9e598585452135012e2945f96874ca6eeb1ff"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:27e13c80ac9a0a1d050ef0a7e0a18cc04c8850101ec891815b6c5a0375e8a245"},
|
||||
{file = "pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.12.5"
|
||||
@@ -4908,6 +5017,24 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.14.1"
|
||||
|
||||
[[package]]
|
||||
name = "pydash"
|
||||
version = "8.0.6"
|
||||
description = "The kitchen sink of Python utility libraries for doing \"stuff\" in a functional way. Based on the Lo-Dash Javascript library."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydash-8.0.6-py3-none-any.whl", hash = "sha256:ee70a81a5b292c007f28f03a4ee8e75c1f5d7576df5457b836ec7ab2839cc5d0"},
|
||||
{file = "pydash-8.0.6.tar.gz", hash = "sha256:b2821547e9723f69cf3a986be4db64de41730be149b2641947ecd12e1e11025a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">3.10,<4.6.0 || >4.6.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["build", "coverage", "furo", "invoke", "mypy", "pytest", "pytest-cov", "pytest-mypy-testing", "ruff", "sphinx", "sphinx-autodoc-typehints", "tox", "twine", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
version = "3.2.0"
|
||||
@@ -5240,65 +5367,85 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.2"
|
||||
version = "6.0.3"
|
||||
description = "YAML parser and emitter for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
|
||||
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"},
|
||||
{file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5896,6 +6043,38 @@ pydantic = ">=2.6.0"
|
||||
ruamel-yaml = ">=0.17.21"
|
||||
typing-extensions = ">=4.7.1"
|
||||
|
||||
[[package]]
|
||||
name = "scaleway"
|
||||
version = "2.10.3"
|
||||
description = "Scaleway SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "scaleway-2.10.3-py3-none-any.whl", hash = "sha256:dbf381440d6caf37c878cf16445a63f4969a4aac2257c9b72c744d10ff223a0c"},
|
||||
{file = "scaleway-2.10.3.tar.gz", hash = "sha256:b1f9dd1b1450767205234c6f5a345e5e25dc039c780253d698893b5c344ce594"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
scaleway-core = "2.10.3"
|
||||
|
||||
[[package]]
|
||||
name = "scaleway-core"
|
||||
version = "2.10.3"
|
||||
description = "Scaleway SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "scaleway_core-2.10.3-py3-none-any.whl", hash = "sha256:fd4112144554d6adae22ff737555eeb0e38cb1063250b3e88c9aebc1b957793b"},
|
||||
{file = "scaleway_core-2.10.3.tar.gz", hash = "sha256:56432f755d694669429de51d51c1d0b3361b28dc2f939b28e4cb954610ee76be"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
PyYAML = ">=6.0,<7.0"
|
||||
requests = ">=2.28.1,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "schema"
|
||||
version = "0.7.5"
|
||||
@@ -6447,16 +6626,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "xmltodict"
|
||||
version = "0.14.2"
|
||||
version = "1.0.4"
|
||||
description = "Makes working with XML feel like you are working with JSON"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["dev"]
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "xmltodict-0.14.2-py2.py3-none-any.whl", hash = "sha256:20cc7d723ed729276e808f26fb6b3599f786cbc37e06c65e192ba77c40f20aac"},
|
||||
{file = "xmltodict-0.14.2.tar.gz", hash = "sha256:201e7c28bb210e374999d1dde6382923ab0ed1a8a5faeece48ab525b7810a553"},
|
||||
{file = "xmltodict-1.0.4-py3-none-any.whl", hash = "sha256:a4a00d300b0e1c59fc2bfccb53d7b2e88c32f200df138a0dd2229f842497026a"},
|
||||
{file = "xmltodict-1.0.4.tar.gz", hash = "sha256:6d94c9f834dd9e44514162799d344d815a3a4faec913717a9ecbfa5be1bb8e61"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "yarl"
|
||||
version = "1.20.1"
|
||||
@@ -6735,4 +6917,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
content-hash = "d7e2ad41783a864bb845f63ccc10c88ae1e4ac36d61993ea106bbb4a5f58a843"
|
||||
content-hash = "e158ae9902d799a82e7d91cb4c0eb404d811ae3460310192fbdd198727e647cd"
|
||||
|
||||
@@ -14,15 +14,19 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST:
|
||||
| Action | Skill |
|
||||
|--------|-------|
|
||||
| Add changelog entry for a PR or feature | `prowler-changelog` |
|
||||
| Adding a compliance output formatter (per-provider class + table dispatcher) | `prowler-compliance` |
|
||||
| Adding new providers | `prowler-provider` |
|
||||
| Adding services to existing providers | `prowler-provider` |
|
||||
| Auditing check-to-requirement mappings as a cloud auditor | `prowler-compliance` |
|
||||
| Create PR that requires changelog entry | `prowler-changelog` |
|
||||
| Creating new checks | `prowler-sdk-check` |
|
||||
| Creating/updating compliance frameworks | `prowler-compliance` |
|
||||
| Fixing compliance JSON bugs (duplicate IDs, empty Section, stale refs) | `prowler-compliance` |
|
||||
| Mapping checks to compliance controls | `prowler-compliance` |
|
||||
| Mocking AWS with moto in tests | `prowler-test-sdk` |
|
||||
| Review changelog format and conventions | `prowler-changelog` |
|
||||
| Reviewing compliance framework PRs | `prowler-compliance-review` |
|
||||
| Syncing compliance framework with upstream catalog | `prowler-compliance` |
|
||||
| Update CHANGELOG.md in any component | `prowler-changelog` |
|
||||
| Updating existing checks and metadata | `prowler-sdk-check` |
|
||||
| Writing Prowler SDK tests | `prowler-test-sdk` |
|
||||
|
||||
@@ -9,10 +9,21 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- `entra_service_principal_no_secrets_for_permanent_tier0_roles` check for M365 provider [(#10788)](https://github.com/prowler-cloud/prowler/pull/10788)
|
||||
- `iam_user_access_not_stale_to_sagemaker` check for AWS provider with configurable `max_unused_sagemaker_access_days` (default 90) [(#11000)](https://github.com/prowler-cloud/prowler/pull/11000)
|
||||
- `cloudtrail_bedrock_logging_enabled` check for AWS provider [(#10858)](https://github.com/prowler-cloud/prowler/pull/10858)
|
||||
- Okta provider with OAuth 2.0 authentication and `signon_global_session_idle_timeout_15min` check [(#11079)](https://github.com/prowler-cloud/prowler/pull/11079)
|
||||
- Scaleway provider with `iam_no_root_api_keys` check [(#11166)](https://github.com/prowler-cloud/prowler/pull/11166)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- `entra_emergency_access_exclusion` check for M365 provider now scopes the exclusion requirement to enabled Conditional Access policies with a `Block` grant control instead of every enabled policy, focusing on the lockout-relevant policy set [(#10849)](https://github.com/prowler-cloud/prowler/pull/10849)
|
||||
- AWS IAM customer-managed policy checks no longer emit `FAIL` on unattached policies unless `--scan-unused-services` is enabled [(#11150)](https://github.com/prowler-cloud/prowler/pull/11150)
|
||||
|
||||
---
|
||||
|
||||
## [5.26.2] (Prowler UNRELEASED)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- `entra_users_mfa_capable` and `entra_break_glass_account_fido2_security_key_registered` report a preventive FAIL per affected user (with the missing permission named) when the M365 service principal lacks `AuditLog.Read.All`, instead of mass false positives [(#10907)](https://github.com/prowler-cloud/prowler/pull/10907)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -154,8 +154,10 @@ from prowler.providers.llm.models import LLMOutputOptions
|
||||
from prowler.providers.m365.models import M365OutputOptions
|
||||
from prowler.providers.mongodbatlas.models import MongoDBAtlasOutputOptions
|
||||
from prowler.providers.nhn.models import NHNOutputOptions
|
||||
from prowler.providers.okta.models import OktaOutputOptions
|
||||
from prowler.providers.openstack.models import OpenStackOutputOptions
|
||||
from prowler.providers.oraclecloud.models import OCIOutputOptions
|
||||
from prowler.providers.scaleway.models import ScalewayOutputOptions
|
||||
from prowler.providers.vercel.models import VercelOutputOptions
|
||||
|
||||
|
||||
@@ -426,6 +428,14 @@ def prowler():
|
||||
output_options = VercelOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "okta":
|
||||
output_options = OktaOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "scaleway":
|
||||
output_options = ScalewayOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
|
||||
# Run the quick inventory for the provider if available
|
||||
if hasattr(args, "quick_inventory") and args.quick_inventory:
|
||||
|
||||
@@ -75,7 +75,9 @@ class Provider(str, Enum):
|
||||
ALIBABACLOUD = "alibabacloud"
|
||||
OPENSTACK = "openstack"
|
||||
IMAGE = "image"
|
||||
SCALEWAY = "scaleway"
|
||||
VERCEL = "vercel"
|
||||
OKTA = "okta"
|
||||
|
||||
|
||||
# Compliance
|
||||
|
||||
@@ -649,3 +649,11 @@ vercel:
|
||||
- "_PASSWORD"
|
||||
- "_API_KEY"
|
||||
- "_PRIVATE_KEY"
|
||||
|
||||
okta:
|
||||
# Okta Sign-On Policies
|
||||
# okta.signon_global_session_idle_timeout_15min
|
||||
# Maximum acceptable Global Session idle timeout, in minutes. Defaults to
|
||||
# 15 per DISA STIG V-273186 (OKTA-APP-000020); raise it only with an
|
||||
# explicit risk acceptance.
|
||||
okta_max_session_idle_minutes: 15
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Account == <Okta organization domain, e.g. acme.okta.com>
|
||||
### Bare domain only — no scheme, no path, no trailing slash.
|
||||
### Region is always "*" — Okta has no regional concept.
|
||||
### Resources matches against the policy name (e.g. "Default Policy"), not the id.
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
### For each check you can except Accounts, Regions, Resources and/or Tags.
|
||||
########################### MUTELIST EXAMPLE ###########################
|
||||
Mutelist:
|
||||
Accounts:
|
||||
"acme.okta.com":
|
||||
Checks:
|
||||
"signon_global_session_idle_timeout_15min":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "Default Policy"
|
||||
@@ -741,10 +741,18 @@ def execute(
|
||||
is_finding_muted_args["team_id"] = (
|
||||
team.id if team else global_provider.identity.user_id
|
||||
)
|
||||
elif global_provider.type == "scaleway":
|
||||
is_finding_muted_args["organization_id"] = (
|
||||
global_provider.identity.organization_id
|
||||
)
|
||||
elif global_provider.type == "oraclecloud":
|
||||
is_finding_muted_args["tenancy_id"] = (
|
||||
global_provider.identity.tenancy_id
|
||||
)
|
||||
elif global_provider.type == "okta":
|
||||
is_finding_muted_args["org_domain"] = (
|
||||
global_provider.identity.org_domain
|
||||
)
|
||||
for finding in check_findings:
|
||||
if global_provider.type == "cloudflare":
|
||||
is_finding_muted_args["account_id"] = finding.account_id
|
||||
|
||||
@@ -933,6 +933,41 @@ class CheckReportGithub(Check_Report):
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckReportOkta(Check_Report):
|
||||
"""Contains the Okta Check's finding information."""
|
||||
|
||||
resource_name: str
|
||||
resource_id: str
|
||||
org_domain: str
|
||||
region: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metadata: Dict,
|
||||
resource: Any,
|
||||
resource_name: str = None,
|
||||
resource_id: str = None,
|
||||
org_domain: str = None,
|
||||
region: str = "global",
|
||||
) -> None:
|
||||
"""Initialize the Okta Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource.
|
||||
resource_name: The name of the resource related with the finding.
|
||||
resource_id: The id of the resource related with the finding.
|
||||
org_domain: The Okta organization domain related with the finding.
|
||||
region: Always "global" — Okta has no regional concept.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = resource_name or getattr(resource, "name", "")
|
||||
self.resource_id = resource_id or getattr(resource, "id", "")
|
||||
self.org_domain = org_domain or getattr(resource, "org_domain", "")
|
||||
self.region = region
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckReportGoogleWorkspace(Check_Report):
|
||||
"""Contains the Google Workspace Check's finding information."""
|
||||
@@ -1283,6 +1318,53 @@ class CheckReportVercel(Check_Report):
|
||||
return "global"
|
||||
|
||||
|
||||
class CheckReportScaleway(Check_Report):
|
||||
"""Contains the Scaleway Check's finding information.
|
||||
|
||||
Scaleway scans run at the organization level. Most IAM/account-level
|
||||
resources are global; regional resources expose a ``region`` attribute
|
||||
on the underlying object, which we surface as the report ``region``.
|
||||
"""
|
||||
|
||||
resource_name: str
|
||||
resource_id: str
|
||||
organization_id: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metadata: Dict,
|
||||
resource: Any,
|
||||
resource_name: str = None,
|
||||
resource_id: str = None,
|
||||
organization_id: str = None,
|
||||
) -> None:
|
||||
"""Initialize the Scaleway Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: Check metadata dictionary.
|
||||
resource: The Scaleway resource being checked.
|
||||
resource_name: Override for resource name.
|
||||
resource_id: Override for resource ID.
|
||||
organization_id: Override for the organization ID.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = resource_name or getattr(
|
||||
resource, "name", getattr(resource, "resource_name", "")
|
||||
)
|
||||
self.resource_id = resource_id or getattr(
|
||||
resource, "id", getattr(resource, "resource_id", "")
|
||||
)
|
||||
self.organization_id = organization_id or getattr(
|
||||
resource, "organization_id", ""
|
||||
)
|
||||
self._region = getattr(resource, "region", None) or "global"
|
||||
|
||||
@property
|
||||
def region(self) -> str:
|
||||
"""Scaleway regional resources expose their own region; IAM is global."""
|
||||
return self._region
|
||||
|
||||
|
||||
# Testing Pending
|
||||
def load_check_metadata(metadata_file: str) -> CheckMetadata:
|
||||
"""
|
||||
|
||||
@@ -29,10 +29,10 @@ class ProwlerArgumentParser:
|
||||
self.parser = argparse.ArgumentParser(
|
||||
prog="prowler",
|
||||
formatter_class=RawTextHelpFormatter,
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ...",
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,scaleway,vercel,dashboard,iac,image,llm} ...",
|
||||
epilog="""
|
||||
Available Cloud Providers:
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel}
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,scaleway,vercel}
|
||||
aws AWS Provider
|
||||
azure Azure Provider
|
||||
gcp GCP Provider
|
||||
@@ -40,6 +40,7 @@ Available Cloud Providers:
|
||||
m365 Microsoft 365 Provider
|
||||
github GitHub Provider
|
||||
googleworkspace Google Workspace Provider
|
||||
okta Okta Provider
|
||||
cloudflare Cloudflare Provider
|
||||
oraclecloud Oracle Cloud Infrastructure Provider
|
||||
openstack OpenStack Provider
|
||||
@@ -49,6 +50,7 @@ Available Cloud Providers:
|
||||
image Container Image Provider
|
||||
nhn NHN Provider (Unofficial)
|
||||
mongodbatlas MongoDB Atlas Provider
|
||||
scaleway Scaleway Provider
|
||||
vercel Vercel Provider
|
||||
|
||||
Available components:
|
||||
|
||||
@@ -427,6 +427,33 @@ class Finding(BaseModel):
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["region"] = "global"
|
||||
|
||||
elif provider.type == "okta":
|
||||
output_data["auth_method"] = provider.auth_method
|
||||
output_data["account_uid"] = get_nested_attribute(
|
||||
provider, "identity.org_domain"
|
||||
)
|
||||
output_data["account_name"] = get_nested_attribute(
|
||||
provider, "identity.org_domain"
|
||||
)
|
||||
output_data["account_organization_uid"] = get_nested_attribute(
|
||||
provider, "identity.client_id"
|
||||
)
|
||||
output_data["resource_name"] = check_output.resource_name
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["region"] = "global"
|
||||
|
||||
elif provider.type == "scaleway":
|
||||
output_data["auth_method"] = "api_key"
|
||||
output_data["account_uid"] = get_nested_attribute(
|
||||
provider, "identity.organization_id"
|
||||
)
|
||||
output_data["account_name"] = get_nested_attribute(
|
||||
provider, "identity.bearer_email"
|
||||
) or get_nested_attribute(provider, "identity.organization_id")
|
||||
output_data["resource_name"] = check_output.resource_name
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["region"] = check_output.region
|
||||
|
||||
elif provider.type == "alibabacloud":
|
||||
output_data["auth_method"] = get_nested_attribute(
|
||||
provider, "identity.identity_arn"
|
||||
|
||||
@@ -1400,6 +1400,127 @@ class HTML(Output):
|
||||
)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_okta_assessment_summary(provider: Provider) -> str:
|
||||
"""
|
||||
get_okta_assessment_summary gets the HTML assessment summary for the Okta provider
|
||||
|
||||
Args:
|
||||
provider (Provider): the Okta provider object
|
||||
|
||||
Returns:
|
||||
str: HTML assessment summary for the Okta provider
|
||||
"""
|
||||
try:
|
||||
assessment_items = f"""
|
||||
<li class="list-group-item">
|
||||
<b>Okta Domain:</b> {provider.identity.org_domain}
|
||||
</li>"""
|
||||
|
||||
credentials_items = f"""
|
||||
<li class="list-group-item">
|
||||
<b>Authentication:</b> {provider.auth_method}
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<b>Client ID:</b> {provider.identity.client_id}
|
||||
</li>"""
|
||||
|
||||
return f"""
|
||||
<div class="col-md-2">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Okta Assessment Summary
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{assessment_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Okta Credentials
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{credentials_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>"""
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_scaleway_assessment_summary(provider: Provider) -> str:
|
||||
"""
|
||||
get_scaleway_assessment_summary gets the HTML assessment summary for the Scaleway provider
|
||||
|
||||
Args:
|
||||
provider (Provider): the Scaleway provider object
|
||||
|
||||
Returns:
|
||||
str: HTML assessment summary for the Scaleway provider
|
||||
"""
|
||||
try:
|
||||
assessment_items = f"""
|
||||
<li class="list-group-item">
|
||||
<b>Organization ID:</b> {provider.identity.organization_id}
|
||||
</li>"""
|
||||
|
||||
credentials_items = """
|
||||
<li class="list-group-item">
|
||||
<b>Authentication:</b> API Key
|
||||
</li>"""
|
||||
|
||||
access_key = getattr(provider.session, "access_key", None)
|
||||
if access_key:
|
||||
credentials_items += f"""
|
||||
<li class="list-group-item">
|
||||
<b>Access Key:</b> {access_key}
|
||||
</li>"""
|
||||
|
||||
bearer_type = getattr(provider.identity, "bearer_type", None)
|
||||
bearer_email = getattr(provider.identity, "bearer_email", None)
|
||||
bearer_id = getattr(provider.identity, "bearer_id", None)
|
||||
if bearer_type:
|
||||
bearer_label = bearer_email or bearer_id or "-"
|
||||
credentials_items += f"""
|
||||
<li class="list-group-item">
|
||||
<b>Bearer:</b> {bearer_type} ({bearer_label})
|
||||
</li>"""
|
||||
|
||||
region = getattr(provider.session, "default_region", None)
|
||||
if region:
|
||||
credentials_items += f"""
|
||||
<li class="list-group-item">
|
||||
<b>Default Region:</b> {region}
|
||||
</li>"""
|
||||
|
||||
return f"""
|
||||
<div class="col-md-2">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Scaleway Assessment Summary
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{assessment_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Scaleway Credentials
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{credentials_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>"""
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_assessment_summary(provider: Provider) -> str:
|
||||
"""
|
||||
|
||||
@@ -40,6 +40,10 @@ def stdout_report(finding, color, verbose, status, fix):
|
||||
details = finding.location
|
||||
if finding.check_metadata.Provider == "vercel":
|
||||
details = finding.region
|
||||
if finding.check_metadata.Provider == "okta":
|
||||
details = finding.region
|
||||
if finding.check_metadata.Provider == "scaleway":
|
||||
details = finding.region
|
||||
|
||||
if (verbose or fix) and (not status or finding.status in status):
|
||||
if finding.muted:
|
||||
|
||||
@@ -108,6 +108,12 @@ def display_summary_table(
|
||||
)
|
||||
else:
|
||||
audited_entities = provider.identity.username or "Personal Account"
|
||||
elif provider.type == "okta":
|
||||
entity_type = "Okta Org"
|
||||
audited_entities = provider.identity.org_domain
|
||||
elif provider.type == "scaleway":
|
||||
entity_type = "Organization"
|
||||
audited_entities = provider.identity.organization_id
|
||||
|
||||
# Check if there are findings and that they are not all MANUAL
|
||||
if findings and not all(finding.status == "MANUAL" for finding in findings):
|
||||
|
||||
@@ -16,6 +16,8 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_allows_privilege_escalation(Check):
|
||||
|
||||
for policy in iam_client.policies.values():
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_no_full_access_to_cloudtrail(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_no_full_access_to_kms(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -10,6 +10,8 @@ class iam_policy_no_wildcard_marketplace_subscribe(Check):
|
||||
findings = []
|
||||
for policy in iam_client.policies.values():
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -403,6 +403,30 @@ class Provider(ABC):
|
||||
mutelist_path=arguments.mutelist_file,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
elif "okta" in provider_class_name.lower():
|
||||
provider_class(
|
||||
okta_org_domain=getattr(arguments, "okta_org_domain", ""),
|
||||
okta_client_id=getattr(arguments, "okta_client_id", ""),
|
||||
okta_private_key=getattr(arguments, "okta_private_key", ""),
|
||||
okta_private_key_file=getattr(
|
||||
arguments, "okta_private_key_file", ""
|
||||
),
|
||||
okta_scopes=getattr(arguments, "okta_scopes", None),
|
||||
config_path=arguments.config_file,
|
||||
mutelist_path=arguments.mutelist_file,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
elif "scaleway" in provider_class_name.lower():
|
||||
provider_class(
|
||||
access_key=getattr(arguments, "access_key", None),
|
||||
secret_key=getattr(arguments, "secret_key", None),
|
||||
organization_id=getattr(arguments, "organization_id", None),
|
||||
project_id=getattr(arguments, "project_id", None),
|
||||
region=getattr(arguments, "region", None),
|
||||
config_path=arguments.config_file,
|
||||
mutelist_path=arguments.mutelist_file,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
|
||||
except TypeError as error:
|
||||
logger.critical(
|
||||
|
||||
@@ -85,6 +85,15 @@ class entra_break_glass_account_fido2_security_key_registered(Check):
|
||||
resource_id=user.id,
|
||||
)
|
||||
|
||||
if entra_client.user_registration_details_error:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Cannot verify FIDO2 security key registration for break glass account {user.name}: "
|
||||
f"{entra_client.user_registration_details_error}."
|
||||
)
|
||||
findings.append(report)
|
||||
continue
|
||||
|
||||
auth_methods = set(user.authentication_methods)
|
||||
has_fido2 = "fido2SecurityKey" in auth_methods
|
||||
has_passkey_device_bound = "passKeyDeviceBound" in auth_methods
|
||||
|
||||
@@ -3,7 +3,7 @@ import json
|
||||
from asyncio import gather
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from uuid import UUID
|
||||
|
||||
from kiota_abstractions.base_request_configuration import RequestConfiguration
|
||||
@@ -76,6 +76,7 @@ class Entra(M365Service):
|
||||
|
||||
self.tenant_domain = provider.identity.tenant_domain
|
||||
self.tenant_id = getattr(provider.identity, "tenant_id", None)
|
||||
self.user_registration_details_error: Optional[str] = None
|
||||
attributes = loop.run_until_complete(
|
||||
gather(
|
||||
self._get_authorization_policy(),
|
||||
@@ -854,7 +855,9 @@ class Entra(M365Service):
|
||||
for member in members:
|
||||
user_roles_map.setdefault(member.id, []).append(role_template_id)
|
||||
|
||||
registration_details = await self._get_user_registration_details()
|
||||
registration_details, self.user_registration_details_error = (
|
||||
await self._get_user_registration_details()
|
||||
)
|
||||
|
||||
while users_response:
|
||||
for user in getattr(users_response, "value", []) or []:
|
||||
@@ -897,18 +900,24 @@ class Entra(M365Service):
|
||||
)
|
||||
return users
|
||||
|
||||
async def _get_user_registration_details(self):
|
||||
async def _get_user_registration_details(
|
||||
self,
|
||||
) -> Tuple[Dict[str, Dict[str, Any]], Optional[str]]:
|
||||
"""Retrieve user authentication method registration details.
|
||||
|
||||
Fetches registration details from the Microsoft Graph API, including
|
||||
MFA capability and the specific authentication methods each user has registered.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary mapping user IDs to their registration details,
|
||||
where each value is a dict with 'is_mfa_capable' (bool) and
|
||||
'authentication_methods' (list of str).
|
||||
A tuple containing:
|
||||
- A dictionary mapping user IDs to their registration details,
|
||||
where each value is a dict with 'is_mfa_capable' (bool) and
|
||||
'authentication_methods' (list of str), or an empty dict if
|
||||
retrieval fails.
|
||||
- An error message string if there was an access error, None otherwise.
|
||||
"""
|
||||
registration_details = {}
|
||||
error_message = None
|
||||
try:
|
||||
registration_builder = (
|
||||
self.client.reports.authentication_methods.user_registration_details
|
||||
@@ -933,16 +942,25 @@ class Entra(M365Service):
|
||||
next_link
|
||||
).get()
|
||||
|
||||
except Exception as error:
|
||||
if (
|
||||
error.__class__.__name__ == "ODataError"
|
||||
and error.__dict__.get("response_status_code", None) == 403
|
||||
):
|
||||
except ODataError as error:
|
||||
error_code = getattr(error.error, "code", None) if error.error else None
|
||||
if error_code == "Authorization_RequestDenied":
|
||||
error_message = "Insufficient privileges to read user registration details. Required permission: AuditLog.Read.All"
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error_message}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
error_message = str(error)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
error_message = f"Failed to retrieve user registration details: {error}"
|
||||
|
||||
return registration_details
|
||||
return registration_details, error_message
|
||||
|
||||
async def _get_oauth_apps(self) -> Optional[Dict[str, "OAuthApp"]]:
|
||||
"""
|
||||
|
||||
@@ -13,6 +13,10 @@ class entra_users_mfa_capable(Check):
|
||||
("Ensure all member users are 'MFA capable'").
|
||||
|
||||
Guest users and disabled accounts are excluded from the evaluation.
|
||||
|
||||
- PASS: The member user is MFA capable.
|
||||
- FAIL: The member user is not MFA capable, or MFA capability cannot be
|
||||
verified due to insufficient permissions to read user registration details.
|
||||
"""
|
||||
|
||||
def execute(self) -> List[CheckReportM365]:
|
||||
@@ -42,7 +46,13 @@ class entra_users_mfa_capable(Check):
|
||||
resource_id=user.id,
|
||||
)
|
||||
|
||||
if not user.is_mfa_capable:
|
||||
if entra_client.user_registration_details_error:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Cannot verify MFA capability for user {user.name}: "
|
||||
f"{entra_client.user_registration_details_error}."
|
||||
)
|
||||
elif not user.is_mfa_capable:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"User {user.name} is not MFA capable."
|
||||
else:
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
from prowler.exceptions.exceptions import ProwlerException
|
||||
|
||||
|
||||
# Exceptions codes from 14000 to 14999 are reserved for Okta exceptions
|
||||
class OktaBaseException(ProwlerException):
|
||||
"""Base class for Okta Errors."""
|
||||
|
||||
OKTA_ERROR_CODES = {
|
||||
(14000, "OktaEnvironmentVariableError"): {
|
||||
"message": "Okta environment variable error",
|
||||
"remediation": "Check the Okta environment variables and ensure they are properly set.",
|
||||
},
|
||||
(14001, "OktaSetUpSessionError"): {
|
||||
"message": "Error setting up Okta session",
|
||||
"remediation": "Check the OAuth credentials (org URL, client ID, private key, scopes) and ensure they are properly configured.",
|
||||
},
|
||||
(14002, "OktaSetUpIdentityError"): {
|
||||
"message": "Okta identity setup error due to bad credentials",
|
||||
"remediation": "Check the OAuth credentials and confirm the service app has been granted the required read scopes.",
|
||||
},
|
||||
(14003, "OktaInvalidCredentialsError"): {
|
||||
"message": "Okta credentials are not valid",
|
||||
"remediation": "Check the client ID and private key for the Okta service app.",
|
||||
},
|
||||
(14004, "OktaInvalidOrgDomainError"): {
|
||||
"message": "Okta organization domain is not valid",
|
||||
"remediation": "Provide an Okta-managed domain such as <org>.okta.com (or .oktapreview.com / .okta-emea.com / .okta-gov.com / .okta.mil / .okta-miltest.com / .trex-govcloud.com), with no scheme and no trailing slash.",
|
||||
},
|
||||
(14005, "OktaPrivateKeyFileError"): {
|
||||
"message": "Okta private key file could not be read",
|
||||
"remediation": "Check the file path and permissions, and ensure the file contains a PEM-encoded RSA key or a JWK JSON document.",
|
||||
},
|
||||
(14006, "OktaInsufficientPermissionsError"): {
|
||||
"message": "Okta service app is missing required scopes",
|
||||
"remediation": "Have a Super Admin grant the required *.read scopes to the service app and assign the Read-Only Administrator role.",
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
provider = "Okta"
|
||||
error_info = self.OKTA_ERROR_CODES.get((code, self.__class__.__name__))
|
||||
if error_info is None:
|
||||
error_info = {
|
||||
"message": message or "Unknown Okta error.",
|
||||
"remediation": "Check the Okta API documentation for more details.",
|
||||
}
|
||||
elif message:
|
||||
error_info = error_info.copy()
|
||||
error_info["message"] = message
|
||||
super().__init__(
|
||||
code=code,
|
||||
source=provider,
|
||||
file=file,
|
||||
original_exception=original_exception,
|
||||
error_info=error_info,
|
||||
)
|
||||
|
||||
|
||||
class OktaCredentialsError(OktaBaseException):
|
||||
"""Base class for Okta credentials errors."""
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
super().__init__(code, file, original_exception, message)
|
||||
|
||||
|
||||
class OktaEnvironmentVariableError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14000, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaSetUpSessionError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14001, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaSetUpIdentityError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14002, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInvalidCredentialsError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14003, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInvalidOrgDomainError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14004, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaPrivateKeyFileError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14005, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInsufficientPermissionsError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14006, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
@@ -0,0 +1,43 @@
|
||||
def init_parser(self):
|
||||
"""Init the Okta Provider CLI parser.
|
||||
|
||||
The Okta provider authenticates with OAuth 2.0 (private-key JWT). The
|
||||
private key is intentionally not exposed as a CLI flag — secrets must
|
||||
be supplied via the `OKTA_PRIVATE_KEY` or `OKTA_PRIVATE_KEY_FILE`
|
||||
environment variable. Non-secret values (org URL, client ID, scopes)
|
||||
are flag-configurable.
|
||||
"""
|
||||
okta_parser = self.subparsers.add_parser(
|
||||
"okta", parents=[self.common_providers_parser], help="Okta Provider"
|
||||
)
|
||||
okta_auth_subparser = okta_parser.add_argument_group("Authentication")
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-org-domain",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Okta organization domain (e.g. acme.okta.com). Must be an "
|
||||
"Okta-managed domain (.okta.com / .oktapreview.com / "
|
||||
".okta-emea.com / .okta-gov.com / .okta.mil / "
|
||||
".okta-miltest.com / .trex-govcloud.com), without scheme or path."
|
||||
),
|
||||
default=None,
|
||||
metavar="OKTA_ORG_DOMAIN",
|
||||
)
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-client-id",
|
||||
nargs="?",
|
||||
help="Okta service app Client ID for OAuth 2.0 (private-key JWT)",
|
||||
default=None,
|
||||
metavar="OKTA_CLIENT_ID",
|
||||
)
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-scopes",
|
||||
nargs="+",
|
||||
help=(
|
||||
"OAuth scopes to request, space-separated "
|
||||
"(e.g. okta.policies.read okta.users.read). Defaults to the "
|
||||
"read scopes required by the bundled checks."
|
||||
),
|
||||
default=None,
|
||||
metavar="OKTA_SCOPES",
|
||||
)
|
||||
@@ -0,0 +1,14 @@
|
||||
from prowler.lib.check.models import CheckReportOkta
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
from prowler.lib.outputs.utils import unroll_dict, unroll_tags
|
||||
|
||||
|
||||
class OktaMutelist(Mutelist):
|
||||
def is_finding_muted(self, finding: CheckReportOkta, org_domain: str) -> bool:
|
||||
return self.is_muted(
|
||||
org_domain,
|
||||
finding.check_metadata.CheckID,
|
||||
"*",
|
||||
finding.resource_name,
|
||||
unroll_dict(unroll_tags(finding.resource_tags)),
|
||||
)
|
||||
@@ -0,0 +1,34 @@
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from okta.client import Client as OktaSDKClient
|
||||
|
||||
from prowler.providers.okta.models import OktaSession
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prowler.providers.okta.okta_provider import OktaProvider
|
||||
|
||||
|
||||
class OktaService:
|
||||
"""Base class for Okta service implementations.
|
||||
|
||||
Wraps the async okta-sdk-python `Client` so that subclasses can stay
|
||||
synchronous like the other Prowler providers. The SDK auto-refreshes
|
||||
the OAuth access token; nothing to manage here.
|
||||
"""
|
||||
|
||||
def __init__(self, service: str, provider: "OktaProvider"):
|
||||
self.provider = provider
|
||||
self.service = service
|
||||
self.client = self.__set_client__(provider.session)
|
||||
self.audit_config = provider.audit_config
|
||||
self.fixer_config = provider.fixer_config
|
||||
|
||||
@staticmethod
|
||||
def __set_client__(session: OktaSession) -> OktaSDKClient:
|
||||
return OktaSDKClient(session.to_sdk_config())
|
||||
|
||||
@staticmethod
|
||||
def _run(coro):
|
||||
"""Run an okta-sdk-python coroutine from synchronous code."""
|
||||
return asyncio.run(coro)
|
||||
@@ -0,0 +1,48 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.config.config import output_file_timestamp
|
||||
from prowler.providers.common.models import ProviderOutputOptions
|
||||
|
||||
|
||||
class OktaSession(BaseModel):
|
||||
org_domain: str
|
||||
client_id: str
|
||||
scopes: list[str]
|
||||
private_key: str
|
||||
|
||||
def to_sdk_config(self) -> dict:
|
||||
# Shared by the credential probe (OktaProvider.setup_identity) and
|
||||
# the service-level client (OktaService.__set_client__). Keeping the
|
||||
# builder in one place stops the two SDK config dicts from drifting.
|
||||
# The Okta SDK expects a fully-qualified `orgUrl`; we build it from
|
||||
# the validated domain so user input stays scheme-free.
|
||||
# DPoP proofs are sent on every token request — required by tenants
|
||||
# with "Demonstrating Proof of Possession" enabled on the service
|
||||
# app (or org-wide), harmless on tenants that don't.
|
||||
return {
|
||||
"orgUrl": f"https://{self.org_domain}",
|
||||
"authorizationMode": "PrivateKey",
|
||||
"clientId": self.client_id,
|
||||
"scopes": self.scopes,
|
||||
"privateKey": self.private_key,
|
||||
"dpopEnabled": True,
|
||||
}
|
||||
|
||||
|
||||
class OktaIdentityInfo(BaseModel):
|
||||
org_domain: str
|
||||
client_id: str
|
||||
|
||||
|
||||
class OktaOutputOptions(ProviderOutputOptions):
|
||||
def __init__(self, arguments, bulk_checks_metadata, identity):
|
||||
super().__init__(arguments, bulk_checks_metadata)
|
||||
if (
|
||||
not hasattr(arguments, "output_filename")
|
||||
or arguments.output_filename is None
|
||||
):
|
||||
self.output_filename = (
|
||||
f"prowler-output-{identity.org_domain}-{output_file_timestamp}"
|
||||
)
|
||||
else:
|
||||
self.output_filename = arguments.output_filename
|
||||
@@ -0,0 +1,375 @@
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
from os import environ
|
||||
from typing import Optional, Union
|
||||
|
||||
from colorama import Fore, Style
|
||||
from okta.client import Client as OktaSDKClient
|
||||
|
||||
from prowler.config.config import (
|
||||
default_config_file_path,
|
||||
get_default_mute_file_path,
|
||||
load_and_validate_config_file,
|
||||
)
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
from prowler.lib.utils.utils import print_boxes
|
||||
from prowler.providers.common.models import Audit_Metadata, Connection
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.okta.exceptions.exceptions import (
|
||||
OktaEnvironmentVariableError,
|
||||
OktaInsufficientPermissionsError,
|
||||
OktaInvalidCredentialsError,
|
||||
OktaInvalidOrgDomainError,
|
||||
OktaPrivateKeyFileError,
|
||||
OktaSetUpIdentityError,
|
||||
OktaSetUpSessionError,
|
||||
)
|
||||
from prowler.providers.okta.lib.mutelist.mutelist import OktaMutelist
|
||||
from prowler.providers.okta.models import OktaIdentityInfo, OktaSession
|
||||
|
||||
DEFAULT_SCOPES = ["okta.policies.read"]
|
||||
# Accept only Okta-managed domains. Custom (vanity) domains are rejected on
|
||||
# purpose — they're a recurring source of typos and silent misconfig and
|
||||
# Prowler's audience overwhelmingly uses Okta-managed hosts. The TLDs below
|
||||
# match the set the Okta SDK whitelists in `okta.config.config_validator`,
|
||||
# which includes the commercial, preview, EMEA and US gov/mil environments.
|
||||
# If a customer with a custom domain shows up, lift this guard behind an
|
||||
# explicit opt-in.
|
||||
ORG_DOMAIN_RE = re.compile(
|
||||
r"^[a-z0-9][a-z0-9-]*\.("
|
||||
r"okta\.com|oktapreview\.com|okta-emea\.com|"
|
||||
r"okta-gov\.com|okta\.mil|okta-miltest\.com|trex-govcloud\.com"
|
||||
r")$"
|
||||
)
|
||||
|
||||
|
||||
class OktaProvider(Provider):
|
||||
"""Okta Provider class.
|
||||
|
||||
Authenticates against an Okta organization using OAuth 2.0 with a
|
||||
private-key JWT (Client Credentials grant). The SDK requests and
|
||||
refreshes the access token internally.
|
||||
|
||||
Attributes:
|
||||
_type (str): The type of the provider.
|
||||
_auth_method (str): The authentication method used by the provider.
|
||||
_session (OktaSession): The session object for the provider.
|
||||
_identity (OktaIdentityInfo): The identity information for the provider.
|
||||
_audit_config (dict): The audit configuration for the provider.
|
||||
_fixer_config (dict): The fixer configuration for the provider.
|
||||
_mutelist (Mutelist): The mutelist for the provider.
|
||||
audit_metadata (Audit_Metadata): The audit metadata for the provider.
|
||||
"""
|
||||
|
||||
_type: str = "okta"
|
||||
_auth_method: str = None
|
||||
_session: OktaSession
|
||||
_identity: OktaIdentityInfo
|
||||
_audit_config: dict
|
||||
_fixer_config: dict
|
||||
_mutelist: Mutelist
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
okta_scopes: Optional[Union[str, list[str]]] = None,
|
||||
config_path: str = None,
|
||||
config_content: dict = None,
|
||||
fixer_config: dict = {},
|
||||
mutelist_path: str = None,
|
||||
mutelist_content: dict = None,
|
||||
):
|
||||
"""Okta Provider constructor."""
|
||||
logger.info("Instantiating Okta Provider...")
|
||||
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=okta_org_domain,
|
||||
okta_client_id=okta_client_id,
|
||||
okta_private_key=okta_private_key,
|
||||
okta_private_key_file=okta_private_key_file,
|
||||
)
|
||||
self._session = OktaProvider.setup_session(
|
||||
org_domain=okta_org_domain,
|
||||
client_id=okta_client_id,
|
||||
private_key=okta_private_key,
|
||||
private_key_file=okta_private_key_file,
|
||||
scopes=okta_scopes,
|
||||
)
|
||||
self._identity = OktaProvider.setup_identity(self._session)
|
||||
self._auth_method = "OAuth 2.0 (private-key JWT)"
|
||||
|
||||
if config_content:
|
||||
self._audit_config = config_content
|
||||
else:
|
||||
if not config_path:
|
||||
config_path = default_config_file_path
|
||||
self._audit_config = load_and_validate_config_file(self._type, config_path)
|
||||
self._fixer_config = fixer_config
|
||||
|
||||
if mutelist_content:
|
||||
self._mutelist = OktaMutelist(mutelist_content=mutelist_content)
|
||||
else:
|
||||
if not mutelist_path:
|
||||
mutelist_path = get_default_mute_file_path(self.type)
|
||||
self._mutelist = OktaMutelist(mutelist_path=mutelist_path)
|
||||
|
||||
Provider.set_global_provider(self)
|
||||
|
||||
@property
|
||||
def auth_method(self):
|
||||
return self._auth_method
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
return self._identity
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._type
|
||||
|
||||
@property
|
||||
def audit_config(self):
|
||||
return self._audit_config
|
||||
|
||||
@property
|
||||
def fixer_config(self):
|
||||
return self._fixer_config
|
||||
|
||||
@property
|
||||
def mutelist(self) -> OktaMutelist:
|
||||
return self._mutelist
|
||||
|
||||
@staticmethod
|
||||
def validate_arguments(
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
):
|
||||
"""Validate that all required OAuth credentials are provided.
|
||||
|
||||
Falls back to the matching `OKTA_*` environment variables when a CLI
|
||||
argument is not supplied. The private key may be supplied as raw
|
||||
content (preferred for API/UI integrations) or as a file path.
|
||||
Raises a single combined error if any required value is missing.
|
||||
"""
|
||||
org_domain = okta_org_domain or environ.get("OKTA_ORG_DOMAIN", "")
|
||||
client_id = okta_client_id or environ.get("OKTA_CLIENT_ID", "")
|
||||
private_key = okta_private_key or environ.get("OKTA_PRIVATE_KEY", "")
|
||||
private_key_file = okta_private_key_file or environ.get(
|
||||
"OKTA_PRIVATE_KEY_FILE", ""
|
||||
)
|
||||
|
||||
missing = []
|
||||
if not org_domain:
|
||||
missing.append("--okta-org-domain / OKTA_ORG_DOMAIN")
|
||||
if not client_id:
|
||||
missing.append("--okta-client-id / OKTA_CLIENT_ID")
|
||||
if not private_key and not private_key_file:
|
||||
missing.append("OKTA_PRIVATE_KEY (or OKTA_PRIVATE_KEY_FILE)")
|
||||
if missing:
|
||||
raise OktaEnvironmentVariableError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
"Okta provider requires all OAuth credentials. Missing: "
|
||||
+ ", ".join(missing)
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def setup_session(
|
||||
org_domain: str = "",
|
||||
client_id: str = "",
|
||||
private_key: str = "",
|
||||
private_key_file: str = "",
|
||||
scopes: Optional[Union[str, list[str]]] = None,
|
||||
) -> OktaSession:
|
||||
"""Build an OktaSession from CLI args, falling back to environment variables.
|
||||
|
||||
Accepts the private key as raw content (`private_key` /
|
||||
`OKTA_PRIVATE_KEY`) or as a file path (`private_key_file` /
|
||||
`OKTA_PRIVATE_KEY_FILE`). Content takes precedence when both are
|
||||
supplied — this matches the GitHub provider pattern and keeps the
|
||||
API/UI integrations from having to write keys to disk.
|
||||
"""
|
||||
try:
|
||||
org_domain = org_domain or environ.get("OKTA_ORG_DOMAIN", "")
|
||||
client_id = client_id or environ.get("OKTA_CLIENT_ID", "")
|
||||
private_key = private_key or environ.get("OKTA_PRIVATE_KEY", "")
|
||||
private_key_file = private_key_file or environ.get(
|
||||
"OKTA_PRIVATE_KEY_FILE", ""
|
||||
)
|
||||
if not scopes:
|
||||
scopes = environ.get("OKTA_SCOPES", "")
|
||||
|
||||
org_domain = org_domain.strip().lower()
|
||||
if not ORG_DOMAIN_RE.match(org_domain):
|
||||
raise OktaInvalidOrgDomainError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
f"Invalid Okta org domain: '{org_domain}'. Expected "
|
||||
"an Okta-managed domain such as <org>.okta.com "
|
||||
"(or .oktapreview.com / .okta-emea.com / "
|
||||
".okta-gov.com / .okta.mil / .okta-miltest.com / "
|
||||
".trex-govcloud.com), with no scheme and no path."
|
||||
),
|
||||
)
|
||||
|
||||
if private_key:
|
||||
private_key = private_key.strip()
|
||||
else:
|
||||
try:
|
||||
with open(private_key_file, "r") as fh:
|
||||
private_key = fh.read().strip()
|
||||
except OSError as error:
|
||||
raise OktaPrivateKeyFileError(
|
||||
file=os.path.basename(__file__),
|
||||
original_exception=error,
|
||||
message=f"Could not read private key file '{private_key_file}': {error}",
|
||||
)
|
||||
if not private_key:
|
||||
raise OktaPrivateKeyFileError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
f"Private key file '{private_key_file}' is empty."
|
||||
if private_key_file
|
||||
else "Private key content is empty."
|
||||
),
|
||||
)
|
||||
|
||||
# Accept either a CSV string (from env var / legacy callers) or
|
||||
# a list[str] (from programmatic callers and the CLI's nargs="+").
|
||||
# List elements may themselves contain commas (e.g. "a,b") and
|
||||
# are flattened to support mixed input.
|
||||
if isinstance(scopes, str):
|
||||
raw_items = scopes.split(",")
|
||||
elif isinstance(scopes, list):
|
||||
raw_items = [item for s in scopes for item in str(s).split(",")]
|
||||
else:
|
||||
raw_items = []
|
||||
scope_list = [s.strip() for s in raw_items if s and s.strip()]
|
||||
if not scope_list:
|
||||
scope_list = list(DEFAULT_SCOPES)
|
||||
|
||||
return OktaSession(
|
||||
org_domain=org_domain,
|
||||
client_id=client_id,
|
||||
scopes=scope_list,
|
||||
private_key=private_key,
|
||||
)
|
||||
|
||||
except (OktaInvalidOrgDomainError, OktaPrivateKeyFileError):
|
||||
raise
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise OktaSetUpSessionError(original_exception=error)
|
||||
|
||||
@staticmethod
|
||||
def setup_identity(session: OktaSession) -> OktaIdentityInfo:
|
||||
"""Synthesize identity from the session and verify credentials.
|
||||
|
||||
Service apps don't represent a human user, so the identity is the
|
||||
org URL plus the service-app client ID. We still hit the cheapest
|
||||
scope-covered endpoint (`list_policies` with limit=1) to fail loud
|
||||
when credentials, scopes, or the granted admin role are wrong.
|
||||
"""
|
||||
|
||||
async def _probe():
|
||||
client = OktaSDKClient(session.to_sdk_config())
|
||||
return await client.list_policies(type="OKTA_SIGN_ON", limit="1")
|
||||
|
||||
try:
|
||||
result = asyncio.run(_probe())
|
||||
# SDK returns (items, resp, err) on the normal path and (items, err)
|
||||
# only on early request-creation errors. The error is always last.
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
err_text = str(err).lower()
|
||||
# Distinguish scope/role failures from generic credential
|
||||
# failures — different remediation paths in the docs.
|
||||
permission_signals = (
|
||||
"invalid_scope",
|
||||
"forbidden",
|
||||
"not authorized",
|
||||
"permission",
|
||||
)
|
||||
if any(signal in err_text for signal in permission_signals):
|
||||
raise OktaInsufficientPermissionsError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
"Okta rejected the credential probe with a "
|
||||
f"permission-related error: {err}"
|
||||
),
|
||||
)
|
||||
raise OktaInvalidCredentialsError(
|
||||
file=os.path.basename(__file__),
|
||||
message=f"Failed to authenticate against Okta: {err}",
|
||||
)
|
||||
return OktaIdentityInfo(
|
||||
org_domain=session.org_domain,
|
||||
client_id=session.client_id,
|
||||
)
|
||||
except (OktaInvalidCredentialsError, OktaInsufficientPermissionsError):
|
||||
raise
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise OktaSetUpIdentityError(original_exception=error)
|
||||
|
||||
def print_credentials(self):
|
||||
report_lines = [
|
||||
f"Okta Domain: {Fore.YELLOW}{self.identity.org_domain}{Style.RESET_ALL}",
|
||||
f"Okta Client ID: {Fore.YELLOW}{self.identity.client_id}{Style.RESET_ALL}",
|
||||
f"Authentication Method: {Fore.YELLOW}{self.auth_method}{Style.RESET_ALL}",
|
||||
]
|
||||
report_title = (
|
||||
f"{Style.BRIGHT}Using the Okta credentials below:{Style.RESET_ALL}"
|
||||
)
|
||||
print_boxes(report_lines, report_title)
|
||||
|
||||
@staticmethod
|
||||
def test_connection(
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
okta_scopes: Optional[Union[str, list[str]]] = None,
|
||||
raise_on_exception: bool = True,
|
||||
) -> Connection:
|
||||
"""Test the connection to Okta with the provided OAuth credentials."""
|
||||
try:
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=okta_org_domain,
|
||||
okta_client_id=okta_client_id,
|
||||
okta_private_key=okta_private_key,
|
||||
okta_private_key_file=okta_private_key_file,
|
||||
)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=okta_org_domain,
|
||||
client_id=okta_client_id,
|
||||
private_key=okta_private_key,
|
||||
private_key_file=okta_private_key_file,
|
||||
scopes=okta_scopes,
|
||||
)
|
||||
OktaProvider.setup_identity(session)
|
||||
return Connection(is_connected=True)
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
if raise_on_exception:
|
||||
raise error
|
||||
return Connection(error=error)
|
||||
@@ -0,0 +1,4 @@
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.okta.services.signon.signon_service import Signon
|
||||
|
||||
signon_client = Signon(Provider.get_global_provider())
|
||||
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"Provider": "okta",
|
||||
"CheckID": "signon_global_session_idle_timeout_15min",
|
||||
"CheckTitle": "Default Global Session Policy has a Priority 1 non-default rule enforcing 15-minute idle timeout",
|
||||
"CheckType": [],
|
||||
"ServiceName": "signon",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "NotDefined",
|
||||
"ResourceGroup": "governance",
|
||||
"Description": "The **Default Global Session Policy** must have a **Priority 1** rule that is **not** the built-in `Default Rule`, and that rule must set **Maximum Okta global session idle time** to 15 minutes or less. The threshold defaults to 15 minutes and is overridable via the `okta_max_session_idle_minutes` key in the audit config.",
|
||||
"Risk": "Without a 15-minute idle timeout, an unattended workstation leaves an authenticated Okta session open indefinitely, allowing an attacker physical or remote access to take over the user's identity and pivot into every downstream application that trusts Okta SSO.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://help.okta.com/oie/en-us/content/topics/identity-engine/policies/about-okta-sign-on-policies.htm",
|
||||
"https://developer.okta.com/docs/api/openapi/okta-management/management/tag/Policy/"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Sign in to the Okta Admin Console as a Super Admin\n2. Go to Security > Global Session Policy\n3. Open the Default Policy\n4. Add or edit a non-default rule\n5. Move that rule to Priority 1 so it is evaluated before the built-in Default Rule\n6. Set 'Maximum Okta global session idle time' to 15 minutes or less\n7. Save the rule",
|
||||
"Terraform": "resource \"okta_policy_rule_signon\" \"prowler_idle_timeout_15min\" {\n policy_id = okta_policy_signon.default.id\n name = \"Prowler-enforced idle timeout\"\n status = \"ACTIVE\"\n session_idle = 15\n session_persistent = false\n}\n"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure the Default Global Session Policy so its Priority 1 non-default rule sets the Maximum Okta global session idle time to 15 minutes or less.",
|
||||
"Url": "https://hub.prowler.com/check/signon_global_session_idle_timeout_15min"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"identity-access"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
from prowler.lib.check.models import Check, CheckReportOkta
|
||||
from prowler.providers.okta.services.signon.signon_client import signon_client
|
||||
from prowler.providers.okta.services.signon.signon_service import GlobalSessionPolicy
|
||||
|
||||
DEFAULT_THRESHOLD_MINUTES = 15
|
||||
|
||||
|
||||
class signon_global_session_idle_timeout_15min(Check):
|
||||
"""STIG V-273186 / OKTA-APP-000020.
|
||||
|
||||
The DISA STIG requires the Okta Default Policy to have an active
|
||||
Priority 1 rule that is not the built-in Default Rule, and that
|
||||
rule must set the maximum Okta global session idle time to the
|
||||
configured threshold or lower (defaults to 15 minutes per STIG;
|
||||
override via `okta_max_session_idle_minutes` in the audit config).
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportOkta]:
|
||||
audit_config = signon_client.audit_config or {}
|
||||
threshold = audit_config.get(
|
||||
"okta_max_session_idle_minutes", DEFAULT_THRESHOLD_MINUTES
|
||||
)
|
||||
org_domain = signon_client.provider.identity.org_domain
|
||||
policy = self._get_default_policy()
|
||||
report = CheckReportOkta(
|
||||
metadata=self.metadata(), resource=policy, org_domain=org_domain
|
||||
)
|
||||
|
||||
if policy.id == "default-policy-missing":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"Default Global Session Policy was not found. STIG V-273186 "
|
||||
"requires the Default Policy to contain an active Priority 1 "
|
||||
f"non-default rule with idle timeout <= {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if policy.status and policy.status.upper() != "ACTIVE":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' is in "
|
||||
f"status '{policy.status}'. STIG V-273186 requires an active "
|
||||
"Default Policy with an active Priority 1 non-default rule."
|
||||
)
|
||||
return [report]
|
||||
|
||||
active_rules = sorted(
|
||||
[
|
||||
rule
|
||||
for rule in policy.rules
|
||||
if not rule.status or rule.status.upper() == "ACTIVE"
|
||||
],
|
||||
key=lambda rule: (
|
||||
rule.priority if rule.priority is not None else float("inf"),
|
||||
rule.name,
|
||||
),
|
||||
)
|
||||
if not active_rules:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' has no active "
|
||||
"rules. STIG V-273186 requires an active Priority 1 non-default "
|
||||
f"rule with idle timeout <= {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
priority_one_rule = active_rules[0]
|
||||
if priority_one_rule.priority != 1:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' has no active "
|
||||
f"Priority 1 rule. The first active rule is '{priority_one_rule.name}' "
|
||||
f"at priority {priority_one_rule.priority}."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if priority_one_rule.is_default or priority_one_rule.name == "Default Rule":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' uses "
|
||||
f"'{priority_one_rule.name}' as its active Priority 1 rule. "
|
||||
"The STIG requires a non-default Priority 1 rule."
|
||||
)
|
||||
return [report]
|
||||
|
||||
idle_timeout = priority_one_rule.max_session_idle_minutes
|
||||
if idle_timeout is None:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' does not define "
|
||||
"a maximum Okta global session idle time."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if idle_timeout <= threshold:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' sets the "
|
||||
f"maximum Okta global session idle time to {idle_timeout} "
|
||||
f"minutes, meeting the configured threshold of {threshold} minutes."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' sets the "
|
||||
f"maximum Okta global session idle time to {idle_timeout} "
|
||||
f"minutes, exceeding the configured threshold of {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
@staticmethod
|
||||
def _get_default_policy() -> GlobalSessionPolicy:
|
||||
for policy in signon_client.global_session_policies.values():
|
||||
if policy.is_default or policy.name == "Default Policy":
|
||||
return policy
|
||||
return GlobalSessionPolicy(
|
||||
id="default-policy-missing",
|
||||
name="Default Policy",
|
||||
priority=1,
|
||||
status="MISSING",
|
||||
is_default=True,
|
||||
rules=[],
|
||||
)
|
||||
@@ -0,0 +1,178 @@
|
||||
from typing import Optional
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.okta.lib.service.service import OktaService
|
||||
|
||||
|
||||
def _next_after_cursor(resp) -> Optional[str]:
|
||||
"""Extract the `after` cursor from a `Link: ...; rel="next"` header.
|
||||
|
||||
Returns None when there is no next page. Header format follows RFC 5988
|
||||
and Okta's pagination guide.
|
||||
"""
|
||||
if resp is None:
|
||||
return None
|
||||
headers = getattr(resp, "headers", None) or {}
|
||||
link = headers.get("link") or headers.get("Link") or ""
|
||||
if not link:
|
||||
return None
|
||||
for part in link.split(","):
|
||||
if 'rel="next"' not in part:
|
||||
continue
|
||||
url_segment = part.split(";", 1)[0].strip().lstrip("<").rstrip(">")
|
||||
cursor = parse_qs(urlparse(url_segment).query).get("after", [None])[0]
|
||||
if cursor:
|
||||
return cursor
|
||||
return None
|
||||
|
||||
|
||||
class Signon(OktaService):
|
||||
"""Fetches OKTA_SIGN_ON policies and their rules.
|
||||
|
||||
Populates `self.global_session_policies` keyed by policy id. Each
|
||||
policy carries its rules; downstream checks read directly from this
|
||||
structure.
|
||||
"""
|
||||
|
||||
def __init__(self, provider):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.global_session_policies: dict[str, GlobalSessionPolicy] = (
|
||||
self._list_global_session_policies()
|
||||
)
|
||||
|
||||
def _list_global_session_policies(self) -> dict:
|
||||
logger.info("Signon - Listing OKTA_SIGN_ON policies and rules...")
|
||||
try:
|
||||
return self._run(self._fetch_all())
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return {}
|
||||
|
||||
async def _fetch_all(self) -> dict:
|
||||
result: dict[str, GlobalSessionPolicy] = {}
|
||||
all_policies, err = await self._paginate(
|
||||
lambda after: self.client.list_policies(type="OKTA_SIGN_ON", after=after)
|
||||
)
|
||||
if err is not None:
|
||||
logger.error(f"Error listing OKTA_SIGN_ON policies: {err}")
|
||||
return result
|
||||
|
||||
for policy in all_policies:
|
||||
rules = await self._fetch_rules(policy.id)
|
||||
result[policy.id] = GlobalSessionPolicy(
|
||||
id=policy.id,
|
||||
name=getattr(policy, "name", "") or "",
|
||||
priority=getattr(policy, "priority", None),
|
||||
status=getattr(policy, "status", "") or "",
|
||||
is_default=bool(getattr(policy, "system", False)),
|
||||
rules=rules,
|
||||
)
|
||||
return result
|
||||
|
||||
async def _fetch_rules(self, policy_id: str) -> list:
|
||||
# Okta's `list_policy_rules` endpoint does not expose an `after`
|
||||
# cursor in the SDK signature, so we call once with a generous
|
||||
# `limit`. Tenants with more rules per policy than the limit would
|
||||
# silently truncate; this is rare (most policies have <10 rules).
|
||||
rule_fetch_limit = 100
|
||||
rules_out: list[GlobalSessionPolicyRule] = []
|
||||
result = await self.client.list_policy_rules(
|
||||
policy_id, limit=str(rule_fetch_limit)
|
||||
)
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
logger.error(f"Error listing rules for policy {policy_id}: {err}")
|
||||
return rules_out
|
||||
all_rules = list(result[0] or [])
|
||||
if len(all_rules) >= rule_fetch_limit:
|
||||
logger.warning(
|
||||
f"Policy {policy_id} returned {len(all_rules)} rules — the "
|
||||
f"per-policy fetch limit ({rule_fetch_limit}) was hit; any "
|
||||
"rules beyond this limit are not evaluated by Prowler. "
|
||||
"Review the policy in the Okta Admin Console."
|
||||
)
|
||||
|
||||
for rule in all_rules:
|
||||
actions = getattr(rule, "actions", None)
|
||||
signon = getattr(actions, "signon", None) if actions else None
|
||||
session = getattr(signon, "session", None) if signon else None
|
||||
conditions = getattr(rule, "conditions", None)
|
||||
network = getattr(conditions, "network", None) if conditions else None
|
||||
rules_out.append(
|
||||
GlobalSessionPolicyRule(
|
||||
id=getattr(rule, "id", "") or "",
|
||||
name=getattr(rule, "name", "") or "",
|
||||
priority=getattr(rule, "priority", None),
|
||||
status=getattr(rule, "status", "") or "",
|
||||
is_default=bool(getattr(rule, "system", False)),
|
||||
max_session_idle_minutes=getattr(
|
||||
session, "max_session_idle_minutes", None
|
||||
),
|
||||
max_session_lifetime_minutes=getattr(
|
||||
session, "max_session_lifetime_minutes", None
|
||||
),
|
||||
use_persistent_cookie=getattr(
|
||||
session, "use_persistent_cookie", None
|
||||
),
|
||||
network_zones_include=list(getattr(network, "include", None) or []),
|
||||
network_zones_exclude=list(getattr(network, "exclude", None) or []),
|
||||
)
|
||||
)
|
||||
return rules_out
|
||||
|
||||
@staticmethod
|
||||
async def _paginate(fetch):
|
||||
"""Drain all pages of an SDK list call.
|
||||
|
||||
`fetch` is a callable that takes the `after` cursor (or None for
|
||||
the first page) and returns the SDK's standard `(items, resp, err)`
|
||||
tuple. We follow `Link: rel="next"` headers until exhausted.
|
||||
"""
|
||||
all_items = []
|
||||
result = await fetch(None)
|
||||
# Defensive against the SDK's 2-tuple early-error path: error is last.
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
return [], err
|
||||
items = result[0]
|
||||
resp = result[1] if len(result) >= 3 else None
|
||||
all_items.extend(items or [])
|
||||
while True:
|
||||
cursor = _next_after_cursor(resp)
|
||||
if not cursor:
|
||||
break
|
||||
result = await fetch(cursor)
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
return all_items, err
|
||||
items = result[0]
|
||||
resp = result[1] if len(result) >= 3 else None
|
||||
all_items.extend(items or [])
|
||||
return all_items, None
|
||||
|
||||
|
||||
class GlobalSessionPolicyRule(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
priority: Optional[int] = None
|
||||
status: str = ""
|
||||
is_default: bool = False
|
||||
max_session_idle_minutes: Optional[int] = None
|
||||
max_session_lifetime_minutes: Optional[int] = None
|
||||
use_persistent_cookie: Optional[bool] = None
|
||||
network_zones_include: list[str] = []
|
||||
network_zones_exclude: list[str] = []
|
||||
|
||||
|
||||
class GlobalSessionPolicy(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
priority: Optional[int] = None
|
||||
status: str = ""
|
||||
is_default: bool = False
|
||||
rules: list[GlobalSessionPolicyRule] = []
|
||||
@@ -0,0 +1,99 @@
|
||||
# Exceptions codes from 14000 to 14999 are reserved for Scaleway exceptions
|
||||
from prowler.exceptions.exceptions import ProwlerException
|
||||
|
||||
|
||||
class ScalewayBaseException(ProwlerException):
|
||||
"""Base exception for Scaleway provider errors."""
|
||||
|
||||
SCALEWAY_ERROR_CODES = {
|
||||
(14000, "ScalewayCredentialsError"): {
|
||||
"message": "Scaleway credentials not found or invalid.",
|
||||
"remediation": (
|
||||
"Set the SCW_ACCESS_KEY and SCW_SECRET_KEY environment variables "
|
||||
"with a valid Scaleway API key. Generate one at "
|
||||
"https://console.scaleway.com/iam/api-keys."
|
||||
),
|
||||
},
|
||||
(14001, "ScalewayAuthenticationError"): {
|
||||
"message": "Authentication to the Scaleway API failed.",
|
||||
"remediation": (
|
||||
"Verify your Scaleway API key is valid, has not expired, and that "
|
||||
"the bearer has IAM read permissions on the target organization."
|
||||
),
|
||||
},
|
||||
(14002, "ScalewaySessionError"): {
|
||||
"message": "Failed to create a Scaleway API session.",
|
||||
"remediation": (
|
||||
"Check network connectivity and ensure the Scaleway API is "
|
||||
"reachable at https://api.scaleway.com."
|
||||
),
|
||||
},
|
||||
(14003, "ScalewayIdentityError"): {
|
||||
"message": "Failed to retrieve Scaleway identity information.",
|
||||
"remediation": (
|
||||
"Ensure the API key has permissions to read IAM users and the "
|
||||
"owning organization metadata."
|
||||
),
|
||||
},
|
||||
(14004, "ScalewayAPIError"): {
|
||||
"message": "An error occurred while calling the Scaleway API.",
|
||||
"remediation": (
|
||||
"Check the Scaleway API status at https://status.scaleway.com "
|
||||
"and retry. Run with --log-level DEBUG for the full traceback."
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
provider = "Scaleway"
|
||||
error_info = self.SCALEWAY_ERROR_CODES.get((code, self.__class__.__name__))
|
||||
if error_info is None:
|
||||
error_info = {
|
||||
"message": message or "Unknown Scaleway error.",
|
||||
"remediation": "Check the Scaleway API documentation for more details.",
|
||||
}
|
||||
elif message:
|
||||
error_info = error_info.copy()
|
||||
error_info["message"] = message
|
||||
super().__init__(
|
||||
code=code,
|
||||
source=provider,
|
||||
file=file,
|
||||
original_exception=original_exception,
|
||||
error_info=error_info,
|
||||
)
|
||||
|
||||
|
||||
class ScalewayCredentialsError(ScalewayBaseException):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14000, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class ScalewayAuthenticationError(ScalewayBaseException):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14001, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class ScalewaySessionError(ScalewayBaseException):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14002, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class ScalewayIdentityError(ScalewayBaseException):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14003, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class ScalewayAPIError(ScalewayBaseException):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14004, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
@@ -0,0 +1,57 @@
|
||||
SENSITIVE_ARGUMENTS = frozenset({"--access-key", "--secret-key"})
|
||||
|
||||
|
||||
def init_parser(self):
|
||||
"""Init the Scaleway provider CLI parser."""
|
||||
scaleway_parser = self.subparsers.add_parser(
|
||||
"scaleway",
|
||||
parents=[self.common_providers_parser],
|
||||
help="Scaleway Provider",
|
||||
)
|
||||
|
||||
# Authentication
|
||||
auth_subparser = scaleway_parser.add_argument_group("Authentication")
|
||||
auth_subparser.add_argument(
|
||||
"--access-key",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="SCW_ACCESS_KEY",
|
||||
help=(
|
||||
"Scaleway API access key. Prefer the SCW_ACCESS_KEY env var "
|
||||
"instead of passing it on the command line."
|
||||
),
|
||||
)
|
||||
auth_subparser.add_argument(
|
||||
"--secret-key",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="SCW_SECRET_KEY",
|
||||
help=(
|
||||
"Scaleway API secret key. Prefer the SCW_SECRET_KEY env var "
|
||||
"instead of passing it on the command line."
|
||||
),
|
||||
)
|
||||
|
||||
# Scope
|
||||
scope_subparser = scaleway_parser.add_argument_group("Scope")
|
||||
scope_subparser.add_argument(
|
||||
"--organization-id",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="SCW_DEFAULT_ORGANIZATION_ID",
|
||||
help="Scaleway organization ID to scope the audit.",
|
||||
)
|
||||
scope_subparser.add_argument(
|
||||
"--project-id",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="SCW_DEFAULT_PROJECT_ID",
|
||||
help="Default Scaleway project ID for project-scoped resources.",
|
||||
)
|
||||
scope_subparser.add_argument(
|
||||
"--region",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="SCW_DEFAULT_REGION",
|
||||
help="Default Scaleway region (fr-par, nl-ams, pl-waw).",
|
||||
)
|
||||
@@ -0,0 +1,20 @@
|
||||
from prowler.lib.check.models import CheckReportScaleway
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
from prowler.lib.outputs.utils import unroll_dict, unroll_tags
|
||||
|
||||
|
||||
class ScalewayMutelist(Mutelist):
|
||||
"""Scaleway-specific mutelist helper."""
|
||||
|
||||
def is_finding_muted(
|
||||
self,
|
||||
finding: CheckReportScaleway,
|
||||
organization_id: str,
|
||||
) -> bool:
|
||||
return self.is_muted(
|
||||
organization_id,
|
||||
finding.check_metadata.CheckID,
|
||||
finding.region or "global",
|
||||
finding.resource_id or finding.resource_name,
|
||||
unroll_dict(unroll_tags(finding.resource_tags)),
|
||||
)
|
||||
@@ -0,0 +1,44 @@
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.scaleway.exceptions.exceptions import ScalewayAPIError
|
||||
|
||||
|
||||
class ScalewayService:
|
||||
"""Base class for Scaleway services.
|
||||
|
||||
Centralizes the provider context (audit/fixer configuration, the
|
||||
scoping organization, the authenticated ``scaleway.Client``) so each
|
||||
service only worries about which Scaleway API to call.
|
||||
"""
|
||||
|
||||
def __init__(self, service: str, provider):
|
||||
self.provider = provider
|
||||
self.audit_config = provider.audit_config
|
||||
self.fixer_config = provider.fixer_config
|
||||
self.service = service.lower() if not service.islower() else service
|
||||
|
||||
# Shared authenticated client and the organization in scope
|
||||
self.client = provider.session.client
|
||||
self.organization_id = provider.identity.organization_id
|
||||
|
||||
def _safe_call(self, label: str, fn, *args, **kwargs):
|
||||
"""Run a Scaleway SDK call and surface failures as ScalewayAPIError.
|
||||
|
||||
Args:
|
||||
label: Human-readable label for the call (used in logs).
|
||||
fn: SDK function to invoke.
|
||||
|
||||
Returns:
|
||||
The SDK function result, or ``None`` if the call failed.
|
||||
"""
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.service} - {label} failed: "
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise ScalewayAPIError(
|
||||
file=__file__,
|
||||
original_exception=error,
|
||||
message=f"Scaleway API call '{label}' failed.",
|
||||
)
|
||||