Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 68ffb2b219 | |||
| 739be07077 | |||
| 0abbb7fc59 | |||
| 0b4393776c | |||
| 4dd5baadf6 | |||
| 934d995661 |
@@ -72,6 +72,11 @@ provider/vercel:
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/**"
|
||||
|
||||
provider/okta:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/providers/okta/**"
|
||||
- any-glob-to-any-file: "tests/providers/okta/**"
|
||||
|
||||
github_actions:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: ".github/workflows/*"
|
||||
@@ -109,6 +114,8 @@ mutelist:
|
||||
- any-glob-to-any-file: "tests/providers/googleworkspace/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/vercel/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/vercel/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "prowler/providers/okta/lib/mutelist/**"
|
||||
- any-glob-to-any-file: "tests/providers/okta/lib/mutelist/**"
|
||||
|
||||
integration/s3:
|
||||
- changed-files:
|
||||
|
||||
@@ -36,6 +36,7 @@ Please add a detailed description of how to review this PR.
|
||||
|
||||
#### UI
|
||||
- [ ] All issue/task requirements work as expected on the UI
|
||||
- [ ] If this PR adds or updates npm dependencies, include package-health evidence (maintenance, popularity, known vulnerabilities, license, release age) and explain why existing/native alternatives are insufficient.
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Mobile (X < 640px)
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Table (640px > X < 1024px)
|
||||
- [ ] Screenshots/Video of the functionality flow (if applicable) - Desktop (X > 1024px)
|
||||
|
||||
@@ -324,6 +324,30 @@ jobs:
|
||||
flags: prowler-py${{ matrix.python-version }}-github
|
||||
files: ./github_coverage.xml
|
||||
|
||||
# Okta Provider
|
||||
- name: Check if Okta files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
id: changed-okta
|
||||
uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/**/okta/**
|
||||
./tests/**/okta/**
|
||||
./poetry.lock
|
||||
|
||||
- name: Run Okta tests
|
||||
if: steps.changed-okta.outputs.any_changed == 'true'
|
||||
run: poetry run pytest -n auto --cov=./prowler/providers/okta --cov-report=xml:okta_coverage.xml tests/providers/okta
|
||||
|
||||
- name: Upload Okta coverage to Codecov
|
||||
if: steps.changed-okta.outputs.any_changed == 'true'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler-py${{ matrix.python-version }}-okta
|
||||
files: ./okta_coverage.xml
|
||||
|
||||
# NHN Provider
|
||||
- name: Check if NHN files changed
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
|
||||
@@ -132,6 +132,10 @@ jobs:
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pnpm run healthcheck
|
||||
|
||||
- name: Run pnpm audit
|
||||
if: steps.check-changes.outputs.any_changed == 'true'
|
||||
run: pnpm run audit
|
||||
|
||||
- name: Run unit tests (all - critical paths changed)
|
||||
if: steps.check-changes.outputs.any_changed == 'true' && steps.critical-changes.outputs.any_changed == 'true'
|
||||
run: |
|
||||
|
||||
@@ -117,9 +117,10 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
|
||||
| MongoDB Atlas | 10 | 3 | 0 | 8 | Official | UI, API, CLI |
|
||||
| LLM | [See `promptfoo` docs.](https://www.promptfoo.dev/docs/red-team/plugins/) | N/A | N/A | N/A | Official | CLI |
|
||||
| Image | N/A | N/A | N/A | N/A | Official | CLI, API |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | CLI |
|
||||
| Google Workspace | 25 | 4 | 2 | 4 | Official | UI, API, CLI |
|
||||
| OpenStack | 34 | 5 | 0 | 9 | Official | UI, API, CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | CLI |
|
||||
| Vercel | 26 | 6 | 0 | 5 | Official | UI, API, CLI |
|
||||
| Okta | 1 | 1 | 0 | 1 | Official | CLI |
|
||||
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
|
||||
|
||||
> [!Note]
|
||||
|
||||
@@ -11,7 +11,6 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
### 🔄 Changed
|
||||
|
||||
- Remove orphaned `gin_resources_search_idx` declaration from `Resource.Meta.indexes` (DB index dropped in `0072_drop_unused_indexes`) [(#11001)](https://github.com/prowler-cloud/prowler/pull/11001)
|
||||
- PDF compliance reports cap detail tables at 100 failed findings per check (configurable via `DJANGO_PDF_MAX_FINDINGS_PER_CHECK`) to bound worker memory on large scans [(#11160)](https://github.com/prowler-cloud/prowler/pull/11160)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -20,15 +20,11 @@ from tasks.jobs.reports import (
|
||||
ThreatScoreReportGenerator,
|
||||
)
|
||||
from tasks.jobs.threatscore import compute_threatscore_metrics
|
||||
from tasks.jobs.threatscore_utils import (
|
||||
_aggregate_requirement_statistics_from_database,
|
||||
_get_compliance_check_ids,
|
||||
)
|
||||
from tasks.jobs.threatscore_utils import _aggregate_requirement_statistics_from_database
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS, MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, Scan, ScanSummary, StateChoices, ThreatScoreSnapshot
|
||||
from api.utils import initialize_prowler_provider
|
||||
from prowler.lib.check.compliance_models import Compliance
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
@@ -431,7 +427,6 @@ def generate_threatscore_report(
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider=None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate a PDF compliance report based on Prowler ThreatScore framework.
|
||||
@@ -460,7 +455,6 @@ def generate_threatscore_report(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
only_failed=only_failed,
|
||||
)
|
||||
|
||||
@@ -475,7 +469,6 @@ def generate_ens_report(
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider=None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate a PDF compliance report for ENS RD2022 framework.
|
||||
@@ -502,7 +495,6 @@ def generate_ens_report(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
include_manual=include_manual,
|
||||
)
|
||||
|
||||
@@ -518,7 +510,6 @@ def generate_nis2_report(
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider=None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate a PDF compliance report for NIS2 Directive (EU) 2022/2555.
|
||||
@@ -546,7 +537,6 @@ def generate_nis2_report(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
only_failed=only_failed,
|
||||
include_manual=include_manual,
|
||||
)
|
||||
@@ -563,7 +553,6 @@ def generate_csa_report(
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider=None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate a PDF compliance report for CSA Cloud Controls Matrix (CCM) v4.0.
|
||||
@@ -591,7 +580,6 @@ def generate_csa_report(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
only_failed=only_failed,
|
||||
include_manual=include_manual,
|
||||
)
|
||||
@@ -608,7 +596,6 @@ def generate_cis_report(
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider=None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate a PDF compliance report for a specific CIS Benchmark variant.
|
||||
@@ -640,7 +627,6 @@ def generate_cis_report(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
only_failed=only_failed,
|
||||
include_manual=include_manual,
|
||||
)
|
||||
@@ -785,17 +771,6 @@ def generate_compliance_reports(
|
||||
results["csa"] = {"upload": False, "path": ""}
|
||||
generate_csa = False
|
||||
|
||||
# Load the framework definitions for this provider once. We use this map
|
||||
# both to pick the latest CIS variant and to precompute the set of
|
||||
# check_ids each framework consumes (for findings_cache eviction).
|
||||
frameworks_bulk: dict = {}
|
||||
try:
|
||||
frameworks_bulk = Compliance.get_bulk(provider_type)
|
||||
except Exception as e:
|
||||
logger.error("Error loading compliance frameworks for %s: %s", provider_type, e)
|
||||
# Fall through; individual frameworks will still try and fail
|
||||
# gracefully if their compliance_id is missing.
|
||||
|
||||
# For CIS we do NOT pre-check the provider against a hard-coded whitelist
|
||||
# (that list drifts the moment a new CIS JSON ships). Instead, we inspect
|
||||
# the dynamically loaded framework map and pick the latest available CIS
|
||||
@@ -803,6 +778,7 @@ def generate_compliance_reports(
|
||||
latest_cis: str | None = None
|
||||
if generate_cis:
|
||||
try:
|
||||
frameworks_bulk = Compliance.get_bulk(provider_type)
|
||||
latest_cis = _pick_latest_cis_variant(
|
||||
name for name in frameworks_bulk.keys() if name.startswith("cis_")
|
||||
)
|
||||
@@ -839,84 +815,10 @@ def generate_compliance_reports(
|
||||
tenant_id, scan_id
|
||||
)
|
||||
|
||||
# Initialize the Prowler provider once for the whole report batch. Each
|
||||
# generator used to re-init this in _load_compliance_data, paying the
|
||||
# boto3/Azure-SDK construction cost 5 times per scan. The instance is
|
||||
# only used by FindingOutput.transform_api_finding to enrich findings,
|
||||
# so a single shared instance is correct.
|
||||
logger.info("Initializing prowler_provider once for all reports (scan %s)", scan_id)
|
||||
try:
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
prowler_provider = initialize_prowler_provider(provider_obj)
|
||||
except Exception as init_error:
|
||||
# If init fails the generators will fall back to lazy init in
|
||||
# _load_compliance_data; we just log and continue.
|
||||
logger.warning(
|
||||
"Could not pre-initialize prowler_provider for scan %s: %s",
|
||||
scan_id,
|
||||
init_error,
|
||||
)
|
||||
prowler_provider = None
|
||||
|
||||
# Create shared findings cache up front so the eviction closure below
|
||||
# can reference it. Defined BEFORE the closure to avoid the UnboundLocalError
|
||||
# trap if an early-return is later inserted between the closure and its
|
||||
# first use.
|
||||
findings_cache: dict[str, list[FindingOutput]] = {}
|
||||
# Create shared findings cache
|
||||
findings_cache = {}
|
||||
logger.info("Created shared findings cache for all reports")
|
||||
|
||||
# Precompute the set of check_ids each framework consumes. After a
|
||||
# framework finishes, every check_id that no remaining framework still
|
||||
# needs is evicted from findings_cache so the dict does not keep
|
||||
# growing through the batch (PROWLER-1733).
|
||||
pending_checks_by_framework: dict[str, set[str]] = {}
|
||||
if generate_threatscore:
|
||||
pending_checks_by_framework["threatscore"] = _get_compliance_check_ids(
|
||||
frameworks_bulk.get(f"prowler_threatscore_{provider_type}")
|
||||
)
|
||||
if generate_ens:
|
||||
pending_checks_by_framework["ens"] = _get_compliance_check_ids(
|
||||
frameworks_bulk.get(f"ens_rd2022_{provider_type}")
|
||||
)
|
||||
if generate_nis2:
|
||||
pending_checks_by_framework["nis2"] = _get_compliance_check_ids(
|
||||
frameworks_bulk.get(f"nis2_{provider_type}")
|
||||
)
|
||||
if generate_csa:
|
||||
pending_checks_by_framework["csa"] = _get_compliance_check_ids(
|
||||
frameworks_bulk.get(f"csa_ccm_4.0_{provider_type}")
|
||||
)
|
||||
if generate_cis and latest_cis:
|
||||
pending_checks_by_framework["cis"] = _get_compliance_check_ids(
|
||||
frameworks_bulk.get(latest_cis)
|
||||
)
|
||||
|
||||
def _evict_after_framework(done_key: str) -> int:
|
||||
"""Drop from findings_cache every check_id no pending framework still needs."""
|
||||
done = pending_checks_by_framework.pop(done_key, set())
|
||||
still_needed: set[str] = (
|
||||
set().union(*pending_checks_by_framework.values())
|
||||
if pending_checks_by_framework
|
||||
else set()
|
||||
)
|
||||
exclusive = done - still_needed
|
||||
evicted = 0
|
||||
for cid in exclusive:
|
||||
if findings_cache.pop(cid, None) is not None:
|
||||
evicted += 1
|
||||
if evicted:
|
||||
logger.info(
|
||||
"Evicted %d exclusive check entries from findings_cache after %s "
|
||||
"(remaining cache size: %d)",
|
||||
evicted,
|
||||
done_key,
|
||||
len(findings_cache),
|
||||
)
|
||||
# Release the lists' memory now instead of waiting for the next
|
||||
# gc cycle; FindingOutput instances retain quite a bit of state.
|
||||
gc.collect()
|
||||
return evicted
|
||||
|
||||
generated_report_keys: list[str] = []
|
||||
output_paths: dict[str, str] = {}
|
||||
out_dir: str | None = None
|
||||
@@ -1005,7 +907,6 @@ def generate_compliance_reports(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
|
||||
# Compute and store ThreatScore metrics snapshot
|
||||
@@ -1083,15 +984,9 @@ def generate_compliance_reports(
|
||||
logger.warning("ThreatScore report saved locally at %s", out_dir)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"compliance_report_failed framework=threatscore scan_id=%s tenant_id=%s",
|
||||
scan_id,
|
||||
tenant_id,
|
||||
)
|
||||
logger.error("Error generating ThreatScore report: %s", e)
|
||||
results["threatscore"] = {"upload": False, "path": "", "error": str(e)}
|
||||
|
||||
_evict_after_framework("threatscore")
|
||||
|
||||
# Generate ENS report
|
||||
if generate_ens:
|
||||
generated_report_keys.append("ens")
|
||||
@@ -1111,7 +1006,6 @@ def generate_compliance_reports(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
|
||||
upload_uri_ens = _upload_to_s3(
|
||||
@@ -1126,15 +1020,9 @@ def generate_compliance_reports(
|
||||
logger.warning("ENS report saved locally at %s", out_dir)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"compliance_report_failed framework=ens scan_id=%s tenant_id=%s",
|
||||
scan_id,
|
||||
tenant_id,
|
||||
)
|
||||
logger.error("Error generating ENS report: %s", e)
|
||||
results["ens"] = {"upload": False, "path": "", "error": str(e)}
|
||||
|
||||
_evict_after_framework("ens")
|
||||
|
||||
# Generate NIS2 report
|
||||
if generate_nis2:
|
||||
generated_report_keys.append("nis2")
|
||||
@@ -1155,7 +1043,6 @@ def generate_compliance_reports(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
|
||||
upload_uri_nis2 = _upload_to_s3(
|
||||
@@ -1170,15 +1057,9 @@ def generate_compliance_reports(
|
||||
logger.warning("NIS2 report saved locally at %s", out_dir)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"compliance_report_failed framework=nis2 scan_id=%s tenant_id=%s",
|
||||
scan_id,
|
||||
tenant_id,
|
||||
)
|
||||
logger.error("Error generating NIS2 report: %s", e)
|
||||
results["nis2"] = {"upload": False, "path": "", "error": str(e)}
|
||||
|
||||
_evict_after_framework("nis2")
|
||||
|
||||
# Generate CSA CCM report
|
||||
if generate_csa:
|
||||
generated_report_keys.append("csa")
|
||||
@@ -1199,7 +1080,6 @@ def generate_compliance_reports(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
|
||||
upload_uri_csa = _upload_to_s3(
|
||||
@@ -1214,15 +1094,9 @@ def generate_compliance_reports(
|
||||
logger.warning("CSA CCM report saved locally at %s", out_dir)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"compliance_report_failed framework=csa scan_id=%s tenant_id=%s",
|
||||
scan_id,
|
||||
tenant_id,
|
||||
)
|
||||
logger.error("Error generating CSA CCM report: %s", e)
|
||||
results["csa"] = {"upload": False, "path": "", "error": str(e)}
|
||||
|
||||
_evict_after_framework("csa")
|
||||
|
||||
# Generate CIS Benchmark report for the latest available version only.
|
||||
# CIS ships multiple versions per provider (e.g. cis_1.4_aws, cis_5.0_aws,
|
||||
# cis_6.0_aws); we dynamically pick the highest semantic version at run
|
||||
@@ -1245,7 +1119,6 @@ def generate_compliance_reports(
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
|
||||
upload_uri_cis = _upload_to_s3(
|
||||
@@ -1274,22 +1147,14 @@ def generate_compliance_reports(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"compliance_report_failed framework=cis variant=%s scan_id=%s tenant_id=%s",
|
||||
latest_cis,
|
||||
scan_id,
|
||||
tenant_id,
|
||||
)
|
||||
logger.error("Error generating CIS report %s: %s", latest_cis, e)
|
||||
results["cis"] = {
|
||||
"upload": False,
|
||||
"path": "",
|
||||
"error": str(e),
|
||||
}
|
||||
finally:
|
||||
# Free ReportLab/matplotlib memory before moving on. CIS is
|
||||
# always the last framework, so evicting its entries clears the
|
||||
# cache entirely (subject to its check_ids set).
|
||||
_evict_after_framework("cis")
|
||||
# Free ReportLab/matplotlib memory before moving on.
|
||||
gc.collect()
|
||||
|
||||
# Clean up temporary files only if all generated reports were
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import gc
|
||||
import os
|
||||
import resource as _resource_module
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
@@ -44,7 +41,6 @@ from .config import (
|
||||
COLOR_LIGHT_BLUE,
|
||||
COLOR_LIGHTER_BLUE,
|
||||
COLOR_PROWLER_DARK_GREEN,
|
||||
FINDINGS_TABLE_CHUNK_SIZE,
|
||||
PADDING_LARGE,
|
||||
PADDING_SMALL,
|
||||
FrameworkConfig,
|
||||
@@ -52,53 +48,6 @@ from .config import (
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _log_phase(phase: str, *, scan_id: str, framework: str):
|
||||
"""Log start/end timing and RSS deltas around a report-building section.
|
||||
|
||||
Emits structured key=value logs so Grafana/Datadog/CloudWatch queries
|
||||
can pivot by ``phase``, ``framework`` and ``scan_id`` to find the
|
||||
slow/heavy section on any given scan. ``getrusage`` returns KB on
|
||||
Linux and bytes on macOS; the values are still useful in relative
|
||||
terms even though units differ across platforms.
|
||||
"""
|
||||
start = time.perf_counter()
|
||||
rss_before = _resource_module.getrusage(_resource_module.RUSAGE_SELF).ru_maxrss
|
||||
logger.info(
|
||||
"phase_start phase=%s scan_id=%s framework=%s rss_kb=%d",
|
||||
phase,
|
||||
scan_id,
|
||||
framework,
|
||||
rss_before,
|
||||
)
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
elapsed = time.perf_counter() - start
|
||||
logger.exception(
|
||||
"phase_failed phase=%s scan_id=%s framework=%s elapsed_s=%.2f",
|
||||
phase,
|
||||
scan_id,
|
||||
framework,
|
||||
elapsed,
|
||||
)
|
||||
raise
|
||||
else:
|
||||
elapsed = time.perf_counter() - start
|
||||
rss_after = _resource_module.getrusage(_resource_module.RUSAGE_SELF).ru_maxrss
|
||||
logger.info(
|
||||
"phase_end phase=%s scan_id=%s framework=%s elapsed_s=%.2f "
|
||||
"rss_kb=%d delta_rss_kb=%d",
|
||||
phase,
|
||||
scan_id,
|
||||
framework,
|
||||
elapsed,
|
||||
rss_after,
|
||||
rss_after - rss_before,
|
||||
)
|
||||
|
||||
|
||||
# Register fonts (done once at module load)
|
||||
_fonts_registered: bool = False
|
||||
|
||||
@@ -386,7 +335,6 @@ class BaseComplianceReportGenerator(ABC):
|
||||
provider_obj: Provider | None = None,
|
||||
requirement_statistics: dict[str, dict[str, int]] | None = None,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
prowler_provider: Any | None = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Generate the PDF compliance report.
|
||||
@@ -403,35 +351,23 @@ class BaseComplianceReportGenerator(ABC):
|
||||
provider_obj: Optional pre-fetched Provider object
|
||||
requirement_statistics: Optional pre-aggregated statistics
|
||||
findings_cache: Optional pre-loaded findings cache
|
||||
prowler_provider: Optional pre-initialized Prowler provider. When
|
||||
generating multiple reports for the same scan the master
|
||||
function initializes this once and passes it in to avoid
|
||||
re-running boto3/Azure-SDK setup per framework.
|
||||
**kwargs: Additional framework-specific arguments
|
||||
"""
|
||||
framework = self.config.display_name
|
||||
logger.info(
|
||||
"report_generation_start framework=%s scan_id=%s compliance_id=%s",
|
||||
framework,
|
||||
scan_id,
|
||||
compliance_id,
|
||||
"Generating %s report for scan %s", self.config.display_name, scan_id
|
||||
)
|
||||
|
||||
try:
|
||||
# 1. Load compliance data
|
||||
with _log_phase(
|
||||
"load_compliance_data", scan_id=scan_id, framework=framework
|
||||
):
|
||||
data = self._load_compliance_data(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
compliance_id=compliance_id,
|
||||
provider_id=provider_id,
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
prowler_provider=prowler_provider,
|
||||
)
|
||||
data = self._load_compliance_data(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
compliance_id=compliance_id,
|
||||
provider_id=provider_id,
|
||||
provider_obj=provider_obj,
|
||||
requirement_statistics=requirement_statistics,
|
||||
findings_cache=findings_cache,
|
||||
)
|
||||
|
||||
# 2. Create PDF document
|
||||
doc = self._create_document(output_path, data)
|
||||
@@ -441,54 +377,37 @@ class BaseComplianceReportGenerator(ABC):
|
||||
elements = []
|
||||
|
||||
# Cover page (lightweight)
|
||||
with _log_phase("cover_page", scan_id=scan_id, framework=framework):
|
||||
elements.extend(self.create_cover_page(data))
|
||||
elements.append(PageBreak())
|
||||
elements.extend(self.create_cover_page(data))
|
||||
elements.append(PageBreak())
|
||||
|
||||
# Executive summary (framework-specific)
|
||||
with _log_phase("executive_summary", scan_id=scan_id, framework=framework):
|
||||
elements.extend(self.create_executive_summary(data))
|
||||
elements.extend(self.create_executive_summary(data))
|
||||
|
||||
# Body sections (charts + requirements index)
|
||||
# Override _build_body_sections() in subclasses to change section order
|
||||
with _log_phase("body_sections", scan_id=scan_id, framework=framework):
|
||||
elements.extend(self._build_body_sections(data))
|
||||
elements.extend(self._build_body_sections(data))
|
||||
|
||||
# Detailed findings - heaviest section, loads findings on-demand
|
||||
with _log_phase("detailed_findings", scan_id=scan_id, framework=framework):
|
||||
elements.extend(self.create_detailed_findings(data, **kwargs))
|
||||
gc.collect() # Free findings data after processing
|
||||
logger.info("Building detailed findings section...")
|
||||
elements.extend(self.create_detailed_findings(data, **kwargs))
|
||||
gc.collect() # Free findings data after processing
|
||||
|
||||
# 4. Build the PDF
|
||||
logger.info(
|
||||
"doc_build_about_to_run framework=%s scan_id=%s elements=%d",
|
||||
framework,
|
||||
scan_id,
|
||||
len(elements),
|
||||
)
|
||||
with _log_phase("doc_build", scan_id=scan_id, framework=framework):
|
||||
self._build_pdf(doc, elements, data)
|
||||
logger.info("Building PDF document with %d elements...", len(elements))
|
||||
self._build_pdf(doc, elements, data)
|
||||
|
||||
# Final cleanup
|
||||
del elements
|
||||
gc.collect()
|
||||
|
||||
logger.info(
|
||||
"report_generation_end framework=%s scan_id=%s output_path=%s",
|
||||
framework,
|
||||
scan_id,
|
||||
output_path,
|
||||
)
|
||||
logger.info("Successfully generated report at %s", output_path)
|
||||
|
||||
except Exception:
|
||||
# logger.exception captures the full traceback; the contextual
|
||||
# keys keep production search-by-scan-id viable.
|
||||
logger.exception(
|
||||
"report_generation_failed framework=%s scan_id=%s compliance_id=%s",
|
||||
framework,
|
||||
scan_id,
|
||||
compliance_id,
|
||||
)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
tb_lineno = e.__traceback__.tb_lineno if e.__traceback__ else "unknown"
|
||||
logger.error("Error generating report, line %s -- %s", tb_lineno, e)
|
||||
logger.error("Full traceback:\n%s", traceback.format_exc())
|
||||
raise
|
||||
|
||||
def _build_body_sections(self, data: ComplianceData) -> list:
|
||||
@@ -719,25 +638,15 @@ class BaseComplianceReportGenerator(ABC):
|
||||
for req in requirements:
|
||||
check_ids_to_load.extend(req.checks)
|
||||
|
||||
# Load findings on-demand only for the checks that will be displayed.
|
||||
# When ``only_failed`` is active at requirement level, also push the
|
||||
# FAIL filter down to the finding level: a requirement marked FAIL
|
||||
# because 1/1000 findings failed must not render a table dominated by
|
||||
# 999 PASS rows. That hides the actual failure under noise and
|
||||
# makes the per-check cap truncate the wrong rows.
|
||||
# ``total_counts`` is populated with the pre-cap total per check_id
|
||||
# (FAIL-only when only_failed is active) so the "Showing first N of
|
||||
# M" banner uses the same denominator the reader cares about.
|
||||
# Load findings on-demand only for the checks that will be displayed
|
||||
# Uses the shared findings cache to avoid duplicate queries across reports
|
||||
logger.info("Loading findings on-demand for %d requirements", len(requirements))
|
||||
total_counts: dict[str, int] = {}
|
||||
findings_by_check_id = _load_findings_for_requirement_checks(
|
||||
data.tenant_id,
|
||||
data.scan_id,
|
||||
check_ids_to_load,
|
||||
data.prowler_provider,
|
||||
data.findings_by_check_id, # Pass the cache to update it
|
||||
total_counts_out=total_counts,
|
||||
only_failed_findings=only_failed,
|
||||
)
|
||||
|
||||
for req in requirements:
|
||||
@@ -769,31 +678,9 @@ class BaseComplianceReportGenerator(ABC):
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Surface truncation BEFORE the tables so readers see it
|
||||
# at the same scroll position as the data itself, not
|
||||
# after thousands of rendered rows.
|
||||
loaded = len(findings)
|
||||
total = total_counts.get(check_id, loaded)
|
||||
if total > loaded:
|
||||
kind = "failed findings" if only_failed else "findings"
|
||||
elements.append(
|
||||
Paragraph(
|
||||
f"<b>⚠ Showing first {loaded:,} of "
|
||||
f"{total:,} {kind} for this check.</b> "
|
||||
f"Use the CSV or JSON export for the full "
|
||||
f"list. The PDF caps detail rows to keep "
|
||||
f"the report readable and bounded in size.",
|
||||
self.styles["normal"],
|
||||
)
|
||||
)
|
||||
elements.append(Spacer(1, 0.05 * inch))
|
||||
|
||||
# Create chunked findings tables to prevent OOM when a
|
||||
# single check has thousands of findings (ReportLab
|
||||
# resolves layout per Flowable, so many small tables
|
||||
# render contiguously with a bounded memory peak).
|
||||
findings_tables = self._create_findings_tables(findings)
|
||||
elements.extend(findings_tables)
|
||||
# Create findings table
|
||||
findings_table = self._create_findings_table(findings)
|
||||
elements.append(findings_table)
|
||||
|
||||
elements.append(Spacer(1, 0.1 * inch))
|
||||
|
||||
@@ -848,7 +735,6 @@ class BaseComplianceReportGenerator(ABC):
|
||||
provider_obj: Provider | None,
|
||||
requirement_statistics: dict | None,
|
||||
findings_cache: dict | None,
|
||||
prowler_provider: Any | None = None,
|
||||
) -> ComplianceData:
|
||||
"""Load and aggregate compliance data from the database.
|
||||
|
||||
@@ -860,9 +746,6 @@ class BaseComplianceReportGenerator(ABC):
|
||||
provider_obj: Optional pre-fetched Provider
|
||||
requirement_statistics: Optional pre-aggregated statistics
|
||||
findings_cache: Optional pre-loaded findings
|
||||
prowler_provider: Optional pre-initialized Prowler provider. When
|
||||
the master function initializes it once and passes it in,
|
||||
we skip the per-report ``initialize_prowler_provider`` call.
|
||||
|
||||
Returns:
|
||||
Aggregated ComplianceData object
|
||||
@@ -872,8 +755,7 @@ class BaseComplianceReportGenerator(ABC):
|
||||
if provider_obj is None:
|
||||
provider_obj = Provider.objects.get(id=provider_id)
|
||||
|
||||
if prowler_provider is None:
|
||||
prowler_provider = initialize_prowler_provider(provider_obj)
|
||||
prowler_provider = initialize_prowler_provider(provider_obj)
|
||||
provider_type = provider_obj.provider
|
||||
|
||||
# Load compliance framework
|
||||
@@ -941,32 +823,13 @@ class BaseComplianceReportGenerator(ABC):
|
||||
) -> SimpleDocTemplate:
|
||||
"""Create the PDF document template.
|
||||
|
||||
Validates that ``output_path`` is a filesystem path string with an
|
||||
existing parent directory. SimpleDocTemplate technically accepts a
|
||||
BytesIO too, but we want every report to land on disk so the
|
||||
Celery worker doesn't hold the full PDF in memory while uploading
|
||||
to S3.
|
||||
|
||||
Args:
|
||||
output_path: Path for the output PDF
|
||||
data: Compliance data for metadata
|
||||
|
||||
Returns:
|
||||
Configured SimpleDocTemplate
|
||||
|
||||
Raises:
|
||||
TypeError: ``output_path`` is not a string.
|
||||
FileNotFoundError: The parent directory does not exist.
|
||||
"""
|
||||
if not isinstance(output_path, str):
|
||||
raise TypeError(
|
||||
"output_path must be a filesystem path string; "
|
||||
f"got {type(output_path).__name__}"
|
||||
)
|
||||
parent_dir = os.path.dirname(output_path)
|
||||
if parent_dir and not os.path.isdir(parent_dir):
|
||||
raise FileNotFoundError(f"Output directory does not exist: {parent_dir}")
|
||||
|
||||
return SimpleDocTemplate(
|
||||
output_path,
|
||||
pagesize=letter,
|
||||
@@ -1013,10 +876,47 @@ class BaseComplianceReportGenerator(ABC):
|
||||
onLaterPages=add_footer,
|
||||
)
|
||||
|
||||
# Column layout shared by all findings sub-tables. Defined as a method so
|
||||
# subclasses can override it without re-implementing the chunking logic.
|
||||
def _findings_table_columns(self) -> list[ColumnConfig]:
|
||||
return [
|
||||
def _create_findings_table(self, findings: list[FindingOutput]) -> Any:
|
||||
"""Create a findings table.
|
||||
|
||||
Args:
|
||||
findings: List of finding objects
|
||||
|
||||
Returns:
|
||||
ReportLab Table element
|
||||
"""
|
||||
|
||||
def get_finding_title(f):
|
||||
metadata = getattr(f, "metadata", None)
|
||||
if metadata:
|
||||
return getattr(metadata, "CheckTitle", getattr(f, "check_id", ""))
|
||||
return getattr(f, "check_id", "")
|
||||
|
||||
def get_resource_name(f):
|
||||
name = getattr(f, "resource_name", "")
|
||||
if not name:
|
||||
name = getattr(f, "resource_uid", "")
|
||||
return name
|
||||
|
||||
def get_severity(f):
|
||||
metadata = getattr(f, "metadata", None)
|
||||
if metadata:
|
||||
return getattr(metadata, "Severity", "").capitalize()
|
||||
return ""
|
||||
|
||||
# Convert findings to dicts for the table
|
||||
data = []
|
||||
for f in findings:
|
||||
item = {
|
||||
"title": get_finding_title(f),
|
||||
"resource_name": get_resource_name(f),
|
||||
"severity": get_severity(f),
|
||||
"status": getattr(f, "status", "").upper(),
|
||||
"region": getattr(f, "region", "global"),
|
||||
}
|
||||
data.append(item)
|
||||
|
||||
columns = [
|
||||
ColumnConfig("Finding", 2.5 * inch, "title"),
|
||||
ColumnConfig("Resource", 3 * inch, "resource_name"),
|
||||
ColumnConfig("Severity", 0.9 * inch, "severity"),
|
||||
@@ -1024,122 +924,9 @@ class BaseComplianceReportGenerator(ABC):
|
||||
ColumnConfig("Region", 0.9 * inch, "region"),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _finding_to_row(f: FindingOutput) -> dict[str, str]:
|
||||
"""Project a FindingOutput onto the row dict the table expects.
|
||||
|
||||
Kept defensive: missing metadata or attributes return empty strings
|
||||
rather than raising, so a single malformed finding never breaks the
|
||||
whole report.
|
||||
"""
|
||||
metadata = getattr(f, "metadata", None)
|
||||
title = (
|
||||
getattr(metadata, "CheckTitle", getattr(f, "check_id", ""))
|
||||
if metadata
|
||||
else getattr(f, "check_id", "")
|
||||
)
|
||||
resource_name = getattr(f, "resource_name", "") or getattr(
|
||||
f, "resource_uid", ""
|
||||
)
|
||||
severity = getattr(metadata, "Severity", "").capitalize() if metadata else ""
|
||||
return {
|
||||
"title": title,
|
||||
"resource_name": resource_name,
|
||||
"severity": severity,
|
||||
"status": getattr(f, "status", "").upper(),
|
||||
"region": getattr(f, "region", "global"),
|
||||
}
|
||||
|
||||
def _create_findings_tables(
|
||||
self,
|
||||
findings: list[FindingOutput],
|
||||
chunk_size: int | None = None,
|
||||
) -> list[Any]:
|
||||
"""Build a list of small findings tables to keep ``doc.build()`` memory bounded.
|
||||
|
||||
ReportLab resolves layout (column widths, row heights, page-breaks)
|
||||
per Flowable. A single ``LongTable`` of 15k rows forces all of that
|
||||
to be computed at once and reliably OOMs the worker on large scans.
|
||||
Splitting into chunks of ``chunk_size`` rows produces an equivalent-
|
||||
looking PDF (LongTable repeats headers; chunks render contiguously)
|
||||
with a bounded memory peak per chunk.
|
||||
|
||||
Args:
|
||||
findings: List of finding objects for a single check.
|
||||
chunk_size: Rows per sub-table. ``None`` uses
|
||||
``FINDINGS_TABLE_CHUNK_SIZE`` from config.
|
||||
|
||||
Returns:
|
||||
List of ReportLab flowables (interleaved ``Table``/``LongTable``
|
||||
and small ``Spacer`` between chunks). Empty list when there are
|
||||
no findings.
|
||||
"""
|
||||
if not findings:
|
||||
return []
|
||||
|
||||
chunk_size = chunk_size or FINDINGS_TABLE_CHUNK_SIZE
|
||||
|
||||
# Build all rows first so we can chunk without re-walking the
|
||||
# FindingOutput list. Malformed findings are skipped with a logged
|
||||
# exception, never enough to abort the entire report.
|
||||
rows: list[dict[str, str]] = []
|
||||
for f in findings:
|
||||
try:
|
||||
rows.append(self._finding_to_row(f))
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Skipping malformed finding while building table for check %s",
|
||||
getattr(f, "check_id", "unknown"),
|
||||
)
|
||||
|
||||
if not rows:
|
||||
return []
|
||||
|
||||
columns = self._findings_table_columns()
|
||||
|
||||
flowables: list = []
|
||||
total = len(rows)
|
||||
for start in range(0, total, chunk_size):
|
||||
chunk = rows[start : start + chunk_size]
|
||||
flowables.append(
|
||||
create_data_table(
|
||||
data=chunk,
|
||||
columns=columns,
|
||||
header_color=self.config.primary_color,
|
||||
normal_style=self.styles["normal_center"],
|
||||
)
|
||||
)
|
||||
# A tiny spacer between chunks keeps them visually contiguous
|
||||
# without forcing a page-break (KeepTogether would negate the
|
||||
# memory benefit of chunking).
|
||||
if start + chunk_size < total:
|
||||
flowables.append(Spacer(1, 0.05 * inch))
|
||||
|
||||
if total > chunk_size:
|
||||
logger.debug(
|
||||
"Built %d findings sub-tables (chunk_size=%d, total_findings=%d)",
|
||||
(total + chunk_size - 1) // chunk_size,
|
||||
chunk_size,
|
||||
total,
|
||||
)
|
||||
|
||||
return flowables
|
||||
|
||||
def _create_findings_table(self, findings: list[FindingOutput]) -> Any:
|
||||
"""Deprecated alias kept for backwards compatibility.
|
||||
|
||||
Returns the first chunk produced by ``_create_findings_tables``.
|
||||
New callers MUST use ``_create_findings_tables``, which returns a
|
||||
list of flowables and is what ``create_detailed_findings`` invokes.
|
||||
"""
|
||||
flowables = self._create_findings_tables(findings)
|
||||
if flowables:
|
||||
return flowables[0]
|
||||
# Empty input → return an empty (header-only) table so callers that
|
||||
# used to receive a Table never get None.
|
||||
return create_data_table(
|
||||
data=[],
|
||||
columns=self._findings_table_columns(),
|
||||
data=data,
|
||||
columns=columns,
|
||||
header_color=self.config.primary_color,
|
||||
normal_style=self.styles["normal_center"],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import gc
|
||||
import io
|
||||
import math
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import matplotlib
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
# Use non-interactive Agg backend for memory efficiency in server environments
|
||||
# This MUST be set before importing pyplot
|
||||
@@ -22,26 +20,6 @@ from .config import ( # noqa: E402
|
||||
CHART_DPI_DEFAULT,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def _log_chart_built(name: str, dpi: int, buffer: io.BytesIO, started: float) -> None:
|
||||
"""Emit a structured DEBUG line summarising a chart render.
|
||||
|
||||
Centralised so the formatting stays consistent across all chart helpers
|
||||
and so we never accidentally pay for buffer.getbuffer().nbytes when
|
||||
debug logging is disabled.
|
||||
"""
|
||||
if logger.isEnabledFor(10): # logging.DEBUG
|
||||
logger.debug(
|
||||
"chart_built name=%s dpi=%d bytes=%d elapsed_s=%.2f",
|
||||
name,
|
||||
dpi,
|
||||
buffer.getbuffer().nbytes,
|
||||
time.perf_counter() - started,
|
||||
)
|
||||
|
||||
|
||||
# Use centralized DPI setting from config
|
||||
DEFAULT_CHART_DPI = CHART_DPI_DEFAULT
|
||||
|
||||
@@ -99,7 +77,6 @@ def create_vertical_bar_chart(
|
||||
Returns:
|
||||
BytesIO buffer containing the PNG image
|
||||
"""
|
||||
_started = time.perf_counter()
|
||||
if color_func is None:
|
||||
color_func = get_chart_color_for_percentage
|
||||
|
||||
@@ -145,7 +122,6 @@ def create_vertical_bar_chart(
|
||||
plt.close(fig)
|
||||
gc.collect() # Force garbage collection after heavy matplotlib operation
|
||||
|
||||
_log_chart_built("vertical_bar", dpi, buffer, _started)
|
||||
return buffer
|
||||
|
||||
|
||||
@@ -180,7 +156,6 @@ def create_horizontal_bar_chart(
|
||||
Returns:
|
||||
BytesIO buffer containing the PNG image
|
||||
"""
|
||||
_started = time.perf_counter()
|
||||
if color_func is None:
|
||||
color_func = get_chart_color_for_percentage
|
||||
|
||||
@@ -232,7 +207,6 @@ def create_horizontal_bar_chart(
|
||||
plt.close(fig)
|
||||
gc.collect() # Force garbage collection after heavy matplotlib operation
|
||||
|
||||
_log_chart_built("horizontal_bar", dpi, buffer, _started)
|
||||
return buffer
|
||||
|
||||
|
||||
@@ -265,7 +239,6 @@ def create_radar_chart(
|
||||
Returns:
|
||||
BytesIO buffer containing the PNG image
|
||||
"""
|
||||
_started = time.perf_counter()
|
||||
num_vars = len(labels)
|
||||
angles = [n / float(num_vars) * 2 * math.pi for n in range(num_vars)]
|
||||
|
||||
@@ -302,7 +275,6 @@ def create_radar_chart(
|
||||
plt.close(fig)
|
||||
gc.collect() # Force garbage collection after heavy matplotlib operation
|
||||
|
||||
_log_chart_built("radar", dpi, buffer, _started)
|
||||
return buffer
|
||||
|
||||
|
||||
@@ -331,7 +303,6 @@ def create_pie_chart(
|
||||
Returns:
|
||||
BytesIO buffer containing the PNG image
|
||||
"""
|
||||
_started = time.perf_counter()
|
||||
fig, ax = plt.subplots(figsize=figsize)
|
||||
|
||||
_, _, autotexts = ax.pie(
|
||||
@@ -359,7 +330,6 @@ def create_pie_chart(
|
||||
plt.close(fig)
|
||||
gc.collect() # Force garbage collection after heavy matplotlib operation
|
||||
|
||||
_log_chart_built("pie", dpi, buffer, _started)
|
||||
return buffer
|
||||
|
||||
|
||||
@@ -392,7 +362,6 @@ def create_stacked_bar_chart(
|
||||
Returns:
|
||||
BytesIO buffer containing the PNG image
|
||||
"""
|
||||
_started = time.perf_counter()
|
||||
fig, ax = plt.subplots(figsize=figsize)
|
||||
|
||||
# Default colors if not provided
|
||||
@@ -432,5 +401,4 @@ def create_stacked_bar_chart(
|
||||
plt.close(fig)
|
||||
gc.collect() # Force garbage collection after heavy matplotlib operation
|
||||
|
||||
_log_chart_built("stacked_bar", dpi, buffer, _started)
|
||||
return buffer
|
||||
|
||||
@@ -475,15 +475,8 @@ def create_data_table(
|
||||
else:
|
||||
value = item.get(col.field, "")
|
||||
|
||||
# Wrap every string cell in Paragraph so the data rows keep the
|
||||
# caller-supplied font/colour/alignment. Skipping Paragraph for
|
||||
# short cells (a tempting micro-optimisation) breaks visual
|
||||
# consistency: ReportLab Table falls back to Helvetica/black for
|
||||
# raw strings, mixing fonts within the same table.
|
||||
# ``escape_html`` keeps ``<``/``>``/``&`` in resource names from
|
||||
# breaking Paragraph's mini-HTML parser.
|
||||
if normal_style and isinstance(value, str):
|
||||
value = Paragraph(escape_html(value), normal_style)
|
||||
value = Paragraph(value, normal_style)
|
||||
row.append(value)
|
||||
table_data.append(row)
|
||||
|
||||
@@ -515,26 +508,17 @@ def create_data_table(
|
||||
for idx, col in enumerate(columns):
|
||||
styles.append(("ALIGN", (idx, 0), (idx, -1), col.align))
|
||||
|
||||
# Alternate row backgrounds: single O(1) ROWBACKGROUNDS style entry.
|
||||
# The previous implementation appended N per-row BACKGROUND commands,
|
||||
# which scaled the TableStyle list linearly with row count. ReportLab
|
||||
# cycles through the colour list row-by-row so the visual is identical.
|
||||
# The ALTERNATE_ROWS_MAX_SIZE cap is preserved to mirror legacy
|
||||
# behaviour (very large tables stay plain), but the memory cost of the
|
||||
# styles list is now constant regardless of row count.
|
||||
# Alternate row backgrounds - skip for very large tables as it adds memory overhead
|
||||
if (
|
||||
alternate_rows
|
||||
and len(table_data) > 1
|
||||
and len(table_data) <= ALTERNATE_ROWS_MAX_SIZE
|
||||
):
|
||||
styles.append(
|
||||
(
|
||||
"ROWBACKGROUNDS",
|
||||
(0, 1),
|
||||
(-1, -1),
|
||||
[colors.white, colors.Color(0.98, 0.98, 0.98)],
|
||||
)
|
||||
)
|
||||
for i in range(1, len(table_data)):
|
||||
if i % 2 == 0:
|
||||
styles.append(
|
||||
("BACKGROUND", (0, i), (-1, i), colors.Color(0.98, 0.98, 0.98))
|
||||
)
|
||||
|
||||
table.setStyle(TableStyle(styles))
|
||||
return table
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from reportlab.lib import colors
|
||||
@@ -24,47 +23,6 @@ ALTERNATE_ROWS_MAX_SIZE = 200
|
||||
# Larger = fewer queries but more memory per batch
|
||||
FINDINGS_BATCH_SIZE = 2000
|
||||
|
||||
# Maximum rows per findings sub-table. ReportLab resolves layout per Flowable;
|
||||
# splitting a huge findings list into multiple smaller tables keeps the peak
|
||||
# memory of doc.build() bounded. A single 15k-row LongTable would force
|
||||
# ReportLab to compute all column widths/row heights/page-breaks at once and
|
||||
# OOM the worker; 300-row chunks are rendered contiguously with negligible
|
||||
# visual impact.
|
||||
FINDINGS_TABLE_CHUNK_SIZE = 300
|
||||
|
||||
# Maximum findings rendered per check in the detailed-findings section.
|
||||
#
|
||||
# Product behaviour: compliance PDFs render at most ``MAX_FINDINGS_PER_CHECK``
|
||||
# **failed** findings per check (PASS rows are excluded at SQL level by the
|
||||
# ``only_failed`` flag that all four list-rendering frameworks default to:
|
||||
# ThreatScore, NIS2, CSA, CIS; ENS does not render finding tables). Above
|
||||
# this cap each affected check renders an in-PDF banner
|
||||
# ("Showing first 100 of N failed findings for this check. Use the CSV
|
||||
# or JSON export for the full list") so the reader knows the table is
|
||||
# truncated and where to find the full data.
|
||||
#
|
||||
# Why a cap exists at all:
|
||||
# * ``FindingOutput.transform_api_finding`` is O(N) per finding (Pydantic
|
||||
# v1 validation + nested model construction).
|
||||
# * ReportLab resolves layout per Flowable; thousands of sub-tables make
|
||||
# ``doc.build()`` very slow and grow the PDF unboundedly.
|
||||
# * A human-readable executive/auditor PDF does not need 12,000 rows for
|
||||
# one check; that is forensic data and lives in the CSV/JSON exports.
|
||||
#
|
||||
# Why 100 specifically:
|
||||
# * Covers ~99% of real scans without truncation (most checks emit far
|
||||
# fewer than 100 findings even in enterprise estates).
|
||||
# * Worst-case rendered rows = 100 × ~500 checks = 50k rows across all
|
||||
# frameworks, which keeps RSS bounded and a 5-framework run completes
|
||||
# in minutes instead of hours.
|
||||
#
|
||||
# Override at runtime via ``DJANGO_PDF_MAX_FINDINGS_PER_CHECK``:
|
||||
# * Set to ``0`` to disable the cap entirely (load every finding; only
|
||||
# advisable for small scans).
|
||||
# * Set to a larger value (e.g. ``500``) for forensic detail in big runs;
|
||||
# watch RSS in the Celery worker.
|
||||
MAX_FINDINGS_PER_CHECK = int(os.environ.get("DJANGO_PDF_MAX_FINDINGS_PER_CHECK", "100"))
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Base colors
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE
|
||||
from django.db.models import Count, F, Q, Window
|
||||
from django.db.models.functions import RowNumber
|
||||
from tasks.jobs.reports.config import MAX_FINDINGS_PER_CHECK
|
||||
from django.db.models import Count, Q
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
@@ -156,8 +154,6 @@ def _load_findings_for_requirement_checks(
|
||||
check_ids: list[str],
|
||||
prowler_provider,
|
||||
findings_cache: dict[str, list[FindingOutput]] | None = None,
|
||||
total_counts_out: dict[str, int] | None = None,
|
||||
only_failed_findings: bool = False,
|
||||
) -> dict[str, list[FindingOutput]]:
|
||||
"""
|
||||
Load findings for specific check IDs on-demand with optional caching.
|
||||
@@ -182,23 +178,6 @@ def _load_findings_for_requirement_checks(
|
||||
prowler_provider: The initialized Prowler provider instance.
|
||||
findings_cache (dict, optional): Cache of already loaded findings.
|
||||
If provided, checks are first looked up in cache before querying database.
|
||||
total_counts_out (dict, optional): If provided, populated with
|
||||
``{check_id: total_findings_in_db}`` BEFORE any per-check cap is
|
||||
applied. Lets callers render a "Showing first N of M" banner for
|
||||
truncated checks. Only populated for ``check_ids`` actually
|
||||
queried (cache hits keep whatever value the caller already had).
|
||||
When ``only_failed_findings=True`` the total is FAIL-only.
|
||||
only_failed_findings (bool): When True, push the ``status=FAIL``
|
||||
filter down into the SQL query so PASS rows are never loaded
|
||||
from the DB nor pydantic-transformed. This matches the
|
||||
``only_failed`` requirement-level filter applied at PDF render
|
||||
time: a requirement marked FAIL because 1/1000 findings failed
|
||||
shouldn't render a table of 999 PASS rows. That hides the
|
||||
actual failure under noise and wastes the per-check cap on
|
||||
irrelevant data. NOTE: the findings cache stores whatever the
|
||||
first caller asked for, so all callers in a single
|
||||
``generate_compliance_reports`` run MUST pass the same flag
|
||||
(which they do: it threads from ``only_failed`` defaults).
|
||||
|
||||
Returns:
|
||||
dict[str, list[FindingOutput]]: Dictionary mapping check_id to list of FindingOutput objects.
|
||||
@@ -243,70 +222,17 @@ def _load_findings_for_requirement_checks(
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
base_qs = Finding.all_objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
check_id__in=check_ids_to_load,
|
||||
# Use iterator with chunk_size for memory-efficient streaming
|
||||
# chunk_size controls how many rows Django fetches from DB at once
|
||||
findings_queryset = (
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
check_id__in=check_ids_to_load,
|
||||
)
|
||||
.order_by("check_id", "uid")
|
||||
.iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
|
||||
)
|
||||
if only_failed_findings:
|
||||
# Push the FAIL filter down into SQL: DB returns ~N×FAIL
|
||||
# rows instead of N×ALL, and we never spend pydantic CPU on
|
||||
# PASS findings the PDF would never render.
|
||||
base_qs = base_qs.filter(status=StatusChoices.FAIL)
|
||||
|
||||
# Aggregate totals once so we (a) know which checks need capping
|
||||
# and (b) can surface "Showing first N of M" in the PDF banner.
|
||||
# Cheap: a single COUNT grouped by check_id.
|
||||
totals: dict[str, int] = {
|
||||
row["check_id"]: row["total"]
|
||||
for row in base_qs.values("check_id").annotate(total=Count("id"))
|
||||
}
|
||||
if total_counts_out is not None:
|
||||
total_counts_out.update(totals)
|
||||
|
||||
cap = MAX_FINDINGS_PER_CHECK
|
||||
checks_over_cap = (
|
||||
{cid for cid, n in totals.items() if n > cap} if cap > 0 else set()
|
||||
)
|
||||
|
||||
# Use iterator with chunk_size for memory-efficient streaming.
|
||||
# FindingOutput.transform_api_finding (prowler/lib/outputs/finding.py)
|
||||
# reads finding.resources.first() and resource.tags.all() per
|
||||
# finding, which without prefetch generates 2N queries per chunk.
|
||||
# prefetch_related runs once per iterator chunk (Django >=4.1) and
|
||||
# collapses that into a constant 2 extra queries per chunk.
|
||||
if checks_over_cap:
|
||||
# Top-N per check via a window function: PostgreSQL only
|
||||
# materialises ``cap * |checks_over_cap| + sum(uncapped)``
|
||||
# rows, vs the full table scan the previous path did.
|
||||
ranked = base_qs.annotate(
|
||||
rn=Window(
|
||||
expression=RowNumber(),
|
||||
partition_by=[F("check_id")],
|
||||
order_by=F("uid").asc(),
|
||||
)
|
||||
)
|
||||
findings_queryset = (
|
||||
Finding.all_objects.filter(
|
||||
id__in=ranked.filter(rn__lte=cap).values("id")
|
||||
)
|
||||
.prefetch_related("resources", "resources__tags")
|
||||
.order_by("check_id", "uid")
|
||||
.iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
|
||||
)
|
||||
logger.info(
|
||||
"Per-check cap=%d active for %d checks (max %d each); "
|
||||
"skipping transform for surplus rows",
|
||||
cap,
|
||||
len(checks_over_cap),
|
||||
cap,
|
||||
)
|
||||
else:
|
||||
findings_queryset = (
|
||||
base_qs.prefetch_related("resources", "resources__tags")
|
||||
.order_by("check_id", "uid")
|
||||
.iterator(chunk_size=DJANGO_FINDINGS_BATCH_SIZE)
|
||||
)
|
||||
|
||||
# Pre-initialize empty lists for all check_ids to load
|
||||
# This avoids repeated dict lookups and 'if not in' checks
|
||||
@@ -322,11 +248,7 @@ def _load_findings_for_requirement_checks(
|
||||
findings_count += 1
|
||||
|
||||
logger.info(
|
||||
"Loaded %d findings for %d checks (truncated %d checks total=%d)",
|
||||
findings_count,
|
||||
len(check_ids_to_load),
|
||||
len(checks_over_cap),
|
||||
sum(totals.values()),
|
||||
f"Loaded {findings_count} findings for {len(check_ids_to_load)} checks"
|
||||
)
|
||||
|
||||
# Build result dict using cache references (no data duplication)
|
||||
@@ -336,40 +258,3 @@ def _load_findings_for_requirement_checks(
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _get_compliance_check_ids(compliance_obj) -> set[str]:
|
||||
"""Return the union of all check_ids referenced by a compliance framework.
|
||||
|
||||
Used by the master report orchestrator to know which checks each
|
||||
framework consumes from the shared ``findings_cache``, so that once a
|
||||
framework finishes the entries no other pending framework needs can be
|
||||
evicted from the cache (PROWLER-1733).
|
||||
|
||||
Args:
|
||||
compliance_obj: A loaded Compliance framework object exposing a
|
||||
``Requirements`` iterable, each requirement carrying ``Checks``.
|
||||
``None`` is treated as "no checks" rather than raising, so the
|
||||
caller can pass ``frameworks_bulk.get(...)`` directly without
|
||||
an extra existence check.
|
||||
|
||||
Returns:
|
||||
Set of check_id strings (empty if ``compliance_obj`` is ``None``).
|
||||
"""
|
||||
if compliance_obj is None:
|
||||
return set()
|
||||
checks: set[str] = set()
|
||||
requirements = getattr(compliance_obj, "Requirements", None) or []
|
||||
try:
|
||||
# Defensive: Mock objects (used in unit tests) return another Mock
|
||||
# for any attribute access, which is truthy but not iterable. Treat
|
||||
# any non-iterable Requirements value as "no checks".
|
||||
for req in requirements:
|
||||
req_checks = getattr(req, "Checks", None) or []
|
||||
try:
|
||||
checks.update(req_checks)
|
||||
except TypeError:
|
||||
continue
|
||||
except TypeError:
|
||||
return set()
|
||||
return checks
|
||||
|
||||
@@ -44,8 +44,6 @@ from api.models import (
|
||||
Finding,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceTag,
|
||||
ResourceTagMapping,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
)
|
||||
@@ -369,317 +367,6 @@ class TestLoadFindingsForChecks:
|
||||
|
||||
assert result == {}
|
||||
|
||||
def test_prefetch_avoids_n_plus_one(self, tenants_fixture, scans_fixture):
|
||||
"""Loading N findings must NOT execute O(N) extra queries for resources/tags.
|
||||
|
||||
Regression test for PROWLER-1733. ``FindingOutput.transform_api_finding``
|
||||
reads ``finding.resources.first()`` and ``resource.tags.all()`` per
|
||||
finding. Without ``prefetch_related`` that's 2N additional queries;
|
||||
with prefetch it collapses to a small constant per iterator chunk.
|
||||
"""
|
||||
from django.test.utils import CaptureQueriesContext
|
||||
from django.db import connections
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
# Build N findings, each linked to one resource that owns 2 tags.
|
||||
N = 20
|
||||
for i in range(N):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
scan=scan,
|
||||
uid=f"f-prefetch-{i}",
|
||||
check_id="aws_check_prefetch",
|
||||
status=StatusChoices.FAIL,
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
check_metadata={
|
||||
"provider": "aws",
|
||||
"checkid": "aws_check_prefetch",
|
||||
"checktitle": "t",
|
||||
"checktype": [],
|
||||
"servicename": "s",
|
||||
"subservicename": "",
|
||||
"severity": "high",
|
||||
"resourcetype": "r",
|
||||
"description": "",
|
||||
"risk": "",
|
||||
"relatedurl": "",
|
||||
"remediation": {
|
||||
"recommendation": {"text": "", "url": ""},
|
||||
"code": {
|
||||
"nativeiac": "",
|
||||
"terraform": "",
|
||||
"cli": "",
|
||||
"other": "",
|
||||
},
|
||||
},
|
||||
"resourceidtemplate": "",
|
||||
"categories": [],
|
||||
"dependson": [],
|
||||
"relatedto": [],
|
||||
"notes": "",
|
||||
},
|
||||
raw_result={},
|
||||
)
|
||||
resource = Resource.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=scan.provider,
|
||||
uid=f"r-prefetch-{i}",
|
||||
name=f"r-prefetch-{i}",
|
||||
metadata="{}",
|
||||
details="",
|
||||
region="us-east-1",
|
||||
service="s",
|
||||
type="t::r",
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id, finding=finding, resource=resource
|
||||
)
|
||||
for k in ("env", "owner"):
|
||||
tag, _ = ResourceTag.objects.get_or_create(
|
||||
tenant_id=tenant.id, key=k, value=f"v-{i}-{k}"
|
||||
)
|
||||
ResourceTagMapping.objects.create(
|
||||
tenant_id=tenant.id, resource=resource, tag=tag
|
||||
)
|
||||
|
||||
mock_provider = Mock()
|
||||
mock_provider.type = "aws"
|
||||
mock_provider.identity.account = "test"
|
||||
|
||||
# Patch transform_api_finding to a no-op so the test isolates queries
|
||||
# to the queryset/prefetch path (transform itself is exercised by
|
||||
# the integration tests above and not by this regression check).
|
||||
with patch(
|
||||
"tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding",
|
||||
side_effect=lambda model, provider: Mock(check_id=model.check_id),
|
||||
):
|
||||
with CaptureQueriesContext(
|
||||
connections["default_read_replica"]
|
||||
if "default_read_replica" in connections.databases
|
||||
else connections["default"]
|
||||
) as ctx:
|
||||
_load_findings_for_requirement_checks(
|
||||
str(tenant.id),
|
||||
str(scan.id),
|
||||
["aws_check_prefetch"],
|
||||
mock_provider,
|
||||
)
|
||||
|
||||
# Expected: a small constant number of queries irrespective of N.
|
||||
# Pre-fix this would be ~1 + 2*N. We give some slack for RLS SET
|
||||
# LOCAL statements that the rls_transaction emits.
|
||||
assert len(ctx.captured_queries) < N, (
|
||||
f"Expected O(1) queries with prefetch_related; got "
|
||||
f"{len(ctx.captured_queries)} for N={N} (N+1 regression?)"
|
||||
)
|
||||
|
||||
def test_max_findings_per_check_cap(self, tenants_fixture, scans_fixture):
|
||||
"""When a check exceeds ``MAX_FINDINGS_PER_CHECK``, only ``cap`` rows
|
||||
are loaded AND ``total_counts_out`` reports the pre-cap total.
|
||||
|
||||
Guards the PROWLER-1733 truncation knob: prevents both runaway memory
|
||||
and silent data loss in the PDF (the banner relies on knowing the
|
||||
real total).
|
||||
"""
|
||||
from unittest.mock import patch as _patch
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
# Create 12 findings for a single check; cap to 5.
|
||||
check_id = "aws_check_cap_test"
|
||||
for i in range(12):
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
scan=scan,
|
||||
uid=f"f-cap-{i:02d}",
|
||||
check_id=check_id,
|
||||
status=StatusChoices.FAIL,
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
check_metadata={},
|
||||
raw_result={},
|
||||
)
|
||||
resource = Resource.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=scan.provider,
|
||||
uid=f"r-cap-{i:02d}",
|
||||
name=f"r-cap-{i:02d}",
|
||||
metadata="{}",
|
||||
details="",
|
||||
region="us-east-1",
|
||||
service="s",
|
||||
type="t::r",
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id, finding=finding, resource=resource
|
||||
)
|
||||
|
||||
mock_provider = Mock(type="aws")
|
||||
mock_provider.identity.account = "test"
|
||||
|
||||
totals: dict = {}
|
||||
# Patch the cap to a small value AND skip the heavy transform so we
|
||||
# only assert on row counts and totals.
|
||||
with (
|
||||
_patch("tasks.jobs.threatscore_utils.MAX_FINDINGS_PER_CHECK", 5),
|
||||
_patch(
|
||||
"tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding",
|
||||
side_effect=lambda model, provider: Mock(check_id=model.check_id),
|
||||
),
|
||||
):
|
||||
result = _load_findings_for_requirement_checks(
|
||||
str(tenant.id),
|
||||
str(scan.id),
|
||||
[check_id],
|
||||
mock_provider,
|
||||
total_counts_out=totals,
|
||||
)
|
||||
|
||||
assert len(result[check_id]) == 5, (
|
||||
f"cap=5 should yield exactly 5 loaded findings, got {len(result[check_id])}"
|
||||
)
|
||||
assert totals[check_id] == 12, (
|
||||
f"total_counts_out should report the pre-cap total (12), got {totals[check_id]}"
|
||||
)
|
||||
|
||||
def test_only_failed_findings_pushes_down_to_sql(
|
||||
self, tenants_fixture, scans_fixture
|
||||
):
|
||||
"""When ``only_failed_findings=True``, PASS rows are excluded by the
|
||||
DB filter, not just visually hidden afterwards.
|
||||
|
||||
Regression for the consistency fix: previously the requirement-level
|
||||
``only_failed`` flag filtered which requirements appeared, but inside
|
||||
each rendered requirement the table still showed PASS rows mixed
|
||||
with FAIL, which combined with ``MAX_FINDINGS_PER_CHECK`` could
|
||||
truncate to 1000 PASS findings and hide the actual failure.
|
||||
"""
|
||||
from unittest.mock import patch as _patch
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
check_id = "aws_check_only_failed_test"
|
||||
|
||||
# Mix PASS and FAIL so the filter has something to drop.
|
||||
for i in range(6):
|
||||
status = StatusChoices.FAIL if i % 2 == 0 else StatusChoices.PASS
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
scan=scan,
|
||||
uid=f"f-of-{i:02d}",
|
||||
check_id=check_id,
|
||||
status=status,
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
check_metadata={},
|
||||
raw_result={},
|
||||
)
|
||||
resource = Resource.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=scan.provider,
|
||||
uid=f"r-of-{i:02d}",
|
||||
name=f"r-of-{i:02d}",
|
||||
metadata="{}",
|
||||
details="",
|
||||
region="us-east-1",
|
||||
service="s",
|
||||
type="t::r",
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id, finding=finding, resource=resource
|
||||
)
|
||||
|
||||
mock_provider = Mock(type="aws")
|
||||
mock_provider.identity.account = "test"
|
||||
|
||||
totals: dict = {}
|
||||
with _patch(
|
||||
"tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding",
|
||||
side_effect=lambda model, provider: Mock(
|
||||
check_id=model.check_id, status=model.status
|
||||
),
|
||||
):
|
||||
result = _load_findings_for_requirement_checks(
|
||||
str(tenant.id),
|
||||
str(scan.id),
|
||||
[check_id],
|
||||
mock_provider,
|
||||
total_counts_out=totals,
|
||||
only_failed_findings=True,
|
||||
)
|
||||
|
||||
# 3 FAIL + 3 PASS in DB; FAIL-only filter should load just 3.
|
||||
loaded = result[check_id]
|
||||
assert len(loaded) == 3, f"expected 3 FAIL findings, got {len(loaded)}"
|
||||
statuses = {getattr(f, "status", None) for f in loaded}
|
||||
assert statuses == {StatusChoices.FAIL}, (
|
||||
f"expected all loaded findings to be FAIL; got statuses {statuses}"
|
||||
)
|
||||
# total_counts must reflect the FAIL-only total, not the global total.
|
||||
assert totals[check_id] == 3, (
|
||||
f"total_counts should be FAIL-only (3), got {totals[check_id]}"
|
||||
)
|
||||
|
||||
def test_max_findings_per_check_disabled(self, tenants_fixture, scans_fixture):
|
||||
"""``MAX_FINDINGS_PER_CHECK=0`` disables the cap; load all rows."""
|
||||
from unittest.mock import patch as _patch
|
||||
|
||||
tenant = tenants_fixture[0]
|
||||
scan = scans_fixture[0]
|
||||
|
||||
check_id = "aws_check_uncapped"
|
||||
for i in range(8):
|
||||
f = Finding.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
scan=scan,
|
||||
uid=f"f-unc-{i:02d}",
|
||||
check_id=check_id,
|
||||
status=StatusChoices.FAIL,
|
||||
severity=Severity.high,
|
||||
impact=Severity.high,
|
||||
check_metadata={},
|
||||
raw_result={},
|
||||
)
|
||||
r = Resource.objects.create(
|
||||
tenant_id=tenant.id,
|
||||
provider=scan.provider,
|
||||
uid=f"r-unc-{i:02d}",
|
||||
name=f"r-unc-{i:02d}",
|
||||
metadata="{}",
|
||||
details="",
|
||||
region="us-east-1",
|
||||
service="s",
|
||||
type="t::r",
|
||||
)
|
||||
ResourceFindingMapping.objects.create(
|
||||
tenant_id=tenant.id, finding=f, resource=r
|
||||
)
|
||||
|
||||
mock_provider = Mock(type="aws")
|
||||
mock_provider.identity.account = "test"
|
||||
totals: dict = {}
|
||||
with (
|
||||
_patch("tasks.jobs.threatscore_utils.MAX_FINDINGS_PER_CHECK", 0),
|
||||
_patch(
|
||||
"tasks.jobs.threatscore_utils.FindingOutput.transform_api_finding",
|
||||
side_effect=lambda model, provider: Mock(check_id=model.check_id),
|
||||
),
|
||||
):
|
||||
result = _load_findings_for_requirement_checks(
|
||||
str(tenant.id),
|
||||
str(scan.id),
|
||||
[check_id],
|
||||
mock_provider,
|
||||
total_counts_out=totals,
|
||||
)
|
||||
|
||||
assert len(result[check_id]) == 8
|
||||
assert totals[check_id] == 8
|
||||
|
||||
|
||||
class TestCleanupStaleTmpOutputDirectories:
|
||||
"""Unit tests for opportunistic stale cleanup under tmp output root."""
|
||||
@@ -1168,181 +855,6 @@ class TestGenerateComplianceReportsOptimized:
|
||||
assert result["cis"] == {"upload": False, "path": ""}
|
||||
mock_cis.assert_not_called()
|
||||
|
||||
@patch("api.utils.initialize_prowler_provider")
|
||||
@patch("tasks.jobs.report.rmtree")
|
||||
@patch("tasks.jobs.report._upload_to_s3")
|
||||
@patch("tasks.jobs.report.generate_cis_report")
|
||||
@patch("tasks.jobs.report.generate_csa_report")
|
||||
@patch("tasks.jobs.report.generate_nis2_report")
|
||||
@patch("tasks.jobs.report.generate_ens_report")
|
||||
@patch("tasks.jobs.report.generate_threatscore_report")
|
||||
@patch("tasks.jobs.report._generate_compliance_output_directory")
|
||||
@patch("tasks.jobs.report._aggregate_requirement_statistics_from_database")
|
||||
@patch("tasks.jobs.report.Compliance.get_bulk")
|
||||
@patch("tasks.jobs.report.Provider.objects.get")
|
||||
@patch("tasks.jobs.report.ScanSummary.objects.filter")
|
||||
def test_findings_cache_eviction_after_framework(
|
||||
self,
|
||||
mock_scan_summary_filter,
|
||||
mock_provider_get,
|
||||
mock_get_bulk,
|
||||
mock_aggregate_stats,
|
||||
mock_generate_output_dir,
|
||||
mock_threatscore,
|
||||
mock_ens,
|
||||
mock_nis2,
|
||||
mock_csa,
|
||||
mock_cis,
|
||||
mock_upload_to_s3,
|
||||
mock_rmtree,
|
||||
mock_init_provider,
|
||||
):
|
||||
"""After each framework finishes, exclusive entries are evicted.
|
||||
|
||||
Threat scenario for PROWLER-1733: the shared ``findings_cache`` used
|
||||
to grow monotonically through all 5 frameworks. With the new
|
||||
eviction logic, check_ids only used by ThreatScore are dropped when
|
||||
ThreatScore finishes, before ENS runs.
|
||||
"""
|
||||
from types import SimpleNamespace
|
||||
from tasks.jobs import report as report_mod
|
||||
|
||||
mock_scan_summary_filter.return_value.exists.return_value = True
|
||||
mock_provider_get.return_value = Mock(uid="provider-uid", provider="aws")
|
||||
# ThreatScore consumes {tsc_only, shared}; ENS consumes {ens_only,
|
||||
# shared}. After ThreatScore evicts, tsc_only must be gone but
|
||||
# shared and ens_only must remain.
|
||||
mock_get_bulk.return_value = {
|
||||
"prowler_threatscore_aws": SimpleNamespace(
|
||||
Requirements=[SimpleNamespace(Checks=["tsc_only", "shared"])]
|
||||
),
|
||||
"ens_rd2022_aws": SimpleNamespace(
|
||||
Requirements=[SimpleNamespace(Checks=["ens_only", "shared"])]
|
||||
),
|
||||
}
|
||||
mock_aggregate_stats.return_value = {}
|
||||
mock_generate_output_dir.return_value = "/tmp/tenant/scan/x/prowler-out"
|
||||
mock_upload_to_s3.return_value = "s3://bucket/tenant/scan/x/report.pdf"
|
||||
mock_init_provider.return_value = Mock(name="prowler_provider")
|
||||
|
||||
# Seed the cache as if both frameworks had already loaded their
|
||||
# findings. We mutate it indirectly: each generator wrapper is a
|
||||
# Mock: make ThreatScore populate the cache, and have ENS observe
|
||||
# the state at call time so we can introspect post-eviction.
|
||||
observed_state: dict = {}
|
||||
|
||||
def _threatscore_side_effect(**kwargs):
|
||||
cache = kwargs["findings_cache"]
|
||||
cache["tsc_only"] = ["tsc-finding"]
|
||||
cache["shared"] = ["shared-finding"]
|
||||
|
||||
def _ens_side_effect(**kwargs):
|
||||
# ENS runs AFTER threatscore's _evict_after_framework("threatscore").
|
||||
observed_state["cache_keys_when_ens_runs"] = set(
|
||||
kwargs["findings_cache"].keys()
|
||||
)
|
||||
kwargs["findings_cache"]["ens_only"] = ["ens-finding"]
|
||||
|
||||
mock_threatscore.side_effect = _threatscore_side_effect
|
||||
mock_ens.side_effect = _ens_side_effect
|
||||
|
||||
report_mod.generate_compliance_reports(
|
||||
tenant_id=str(uuid.uuid4()),
|
||||
scan_id=str(uuid.uuid4()),
|
||||
provider_id=str(uuid.uuid4()),
|
||||
generate_threatscore=True,
|
||||
generate_ens=True,
|
||||
generate_nis2=False,
|
||||
generate_csa=False,
|
||||
generate_cis=False,
|
||||
)
|
||||
|
||||
# ``tsc_only`` was exclusive to ThreatScore → evicted before ENS ran.
|
||||
# ``shared`` is still pending for ENS → must remain.
|
||||
assert "tsc_only" not in observed_state["cache_keys_when_ens_runs"], (
|
||||
"tsc_only should have been evicted before ENS ran"
|
||||
)
|
||||
assert "shared" in observed_state["cache_keys_when_ens_runs"], (
|
||||
"shared must remain in cache because ENS still needs it"
|
||||
)
|
||||
|
||||
@patch("api.utils.initialize_prowler_provider")
|
||||
@patch("tasks.jobs.report.rmtree")
|
||||
@patch("tasks.jobs.report._upload_to_s3")
|
||||
@patch("tasks.jobs.report.generate_cis_report")
|
||||
@patch("tasks.jobs.report.generate_csa_report")
|
||||
@patch("tasks.jobs.report.generate_nis2_report")
|
||||
@patch("tasks.jobs.report.generate_ens_report")
|
||||
@patch("tasks.jobs.report.generate_threatscore_report")
|
||||
@patch("tasks.jobs.report._generate_compliance_output_directory")
|
||||
@patch("tasks.jobs.report._aggregate_requirement_statistics_from_database")
|
||||
@patch("tasks.jobs.report.Compliance.get_bulk")
|
||||
@patch("tasks.jobs.report.Provider.objects.get")
|
||||
@patch("tasks.jobs.report.ScanSummary.objects.filter")
|
||||
def test_prowler_provider_initialized_once(
|
||||
self,
|
||||
mock_scan_summary_filter,
|
||||
mock_provider_get,
|
||||
mock_get_bulk,
|
||||
mock_aggregate_stats,
|
||||
mock_generate_output_dir,
|
||||
mock_threatscore,
|
||||
mock_ens,
|
||||
mock_nis2,
|
||||
mock_csa,
|
||||
mock_cis,
|
||||
mock_upload_to_s3,
|
||||
mock_rmtree,
|
||||
mock_init_provider,
|
||||
):
|
||||
"""``initialize_prowler_provider`` must be called exactly once for
|
||||
the whole batch (PROWLER-1733). Previously each generator re-init'd
|
||||
the SDK provider in ``_load_compliance_data`` → 5 inits per scan.
|
||||
"""
|
||||
mock_scan_summary_filter.return_value.exists.return_value = True
|
||||
mock_provider_get.return_value = Mock(uid="provider-uid", provider="aws")
|
||||
# CIS variant discovery needs at least one cis_* key.
|
||||
mock_get_bulk.return_value = {"cis_6.0_aws": Mock()}
|
||||
mock_aggregate_stats.return_value = {}
|
||||
mock_generate_output_dir.return_value = "/tmp/tenant/scan/x/prowler-out"
|
||||
mock_upload_to_s3.return_value = "s3://bucket/tenant/scan/x/report.pdf"
|
||||
mock_init_provider.return_value = Mock(name="prowler_provider")
|
||||
|
||||
generate_compliance_reports(
|
||||
tenant_id=str(uuid.uuid4()),
|
||||
scan_id=str(uuid.uuid4()),
|
||||
provider_id=str(uuid.uuid4()),
|
||||
generate_threatscore=True,
|
||||
generate_ens=True,
|
||||
generate_nis2=True,
|
||||
generate_csa=True,
|
||||
generate_cis=True,
|
||||
)
|
||||
|
||||
# All 5 wrappers were invoked once each…
|
||||
mock_threatscore.assert_called_once()
|
||||
mock_ens.assert_called_once()
|
||||
mock_nis2.assert_called_once()
|
||||
mock_csa.assert_called_once()
|
||||
mock_cis.assert_called_once()
|
||||
# …but the SDK provider was initialized only once.
|
||||
assert mock_init_provider.call_count == 1, (
|
||||
f"expected 1 init, got {mock_init_provider.call_count} "
|
||||
f"(prowler_provider must be shared across reports)"
|
||||
)
|
||||
|
||||
# The shared instance must reach every wrapper as kwargs.
|
||||
shared = mock_init_provider.return_value
|
||||
for mock_wrapper in (
|
||||
mock_threatscore,
|
||||
mock_ens,
|
||||
mock_nis2,
|
||||
mock_csa,
|
||||
mock_cis,
|
||||
):
|
||||
_, call_kwargs = mock_wrapper.call_args
|
||||
assert call_kwargs.get("prowler_provider") is shared
|
||||
|
||||
@patch("tasks.jobs.report.rmtree")
|
||||
@patch("tasks.jobs.report._upload_to_s3")
|
||||
@patch("tasks.jobs.report.generate_threatscore_report")
|
||||
|
||||
@@ -1269,48 +1269,6 @@ class TestComponentEdgeCases:
|
||||
# Should be a LongTable for large datasets
|
||||
assert isinstance(table, LongTable)
|
||||
|
||||
def test_zebra_uses_rowbackgrounds_not_per_row_background(self, monkeypatch):
|
||||
"""The styles list must contain exactly one ROWBACKGROUNDS entry
|
||||
regardless of row count, never N per-row BACKGROUND entries.
|
||||
"""
|
||||
captured: dict = {}
|
||||
|
||||
# Capture the list passed to TableStyle. create_data_table builds a
|
||||
# list of style tuples and wraps it in a TableStyle exactly once;
|
||||
# by patching TableStyle we intercept that list.
|
||||
import tasks.jobs.reports.components as comp_mod
|
||||
|
||||
original_table_style = comp_mod.TableStyle
|
||||
|
||||
def _capture_table_style(style_list):
|
||||
captured["styles"] = list(style_list)
|
||||
return original_table_style(style_list)
|
||||
|
||||
monkeypatch.setattr(comp_mod, "TableStyle", _capture_table_style)
|
||||
|
||||
data = [{"name": f"Item {i}"} for i in range(60)]
|
||||
columns = [ColumnConfig("Name", 2 * inch, "name")]
|
||||
comp_mod.create_data_table(data, columns, alternate_rows=True)
|
||||
|
||||
styles = captured["styles"]
|
||||
# Count by command name.
|
||||
names = [s[0] for s in styles if isinstance(s, tuple) and s]
|
||||
# Exactly one ROWBACKGROUNDS entry.
|
||||
assert names.count("ROWBACKGROUNDS") == 1
|
||||
# Zero per-row BACKGROUND entries on data rows. (The header row
|
||||
# BACKGROUND command is intentional and lives at coords (0,0)/(-1,0).)
|
||||
data_row_bg = [
|
||||
s
|
||||
for s in styles
|
||||
if isinstance(s, tuple)
|
||||
and s[0] == "BACKGROUND"
|
||||
and not (s[1] == (0, 0) and s[2] == (-1, 0))
|
||||
]
|
||||
assert data_row_bg == [], (
|
||||
f"expected no per-row BACKGROUND entries on data rows; "
|
||||
f"got {len(data_row_bg)}"
|
||||
)
|
||||
|
||||
def test_create_risk_component_zero_values(self):
|
||||
"""Test risk component with zero values."""
|
||||
component = create_risk_component(risk_level=0, weight=0, score=0)
|
||||
@@ -1386,194 +1344,3 @@ class TestFrameworkConfigEdgeCases:
|
||||
assert get_framework_config("my_custom_threatscore_compliance") is not None
|
||||
assert get_framework_config("ens_something_else") is not None
|
||||
assert get_framework_config("nis2_gcp") is not None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Findings Table Chunking Tests (PROWLER-1733)
|
||||
# =============================================================================
|
||||
#
|
||||
# These tests guard the OOM-prevention behaviour added in PROWLER-1733:
|
||||
# ``_create_findings_tables`` must split a list of findings into multiple
|
||||
# small sub-tables instead of producing one giant Table, which would force
|
||||
# ReportLab to resolve layout for all rows at once and OOM the worker on
|
||||
# scans with thousands of findings per check.
|
||||
|
||||
|
||||
class _DummyMetadata:
|
||||
"""Lightweight stand-in for FindingOutput.metadata used in chunking tests."""
|
||||
|
||||
def __init__(self, check_title: str = "Title", severity: str = "high"):
|
||||
self.CheckTitle = check_title
|
||||
self.Severity = severity
|
||||
|
||||
|
||||
class _DummyFinding:
|
||||
"""Lightweight stand-in for FindingOutput used in chunking tests.
|
||||
|
||||
The chunking code only reads a small set of attributes via ``getattr``,
|
||||
so a duck-typed object is enough and lets the tests run without touching
|
||||
the DB or pydantic deserialisation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
check_id: str = "aws_check",
|
||||
resource_name: str = "res-1",
|
||||
resource_uid: str = "",
|
||||
status: str = "FAIL",
|
||||
region: str = "us-east-1",
|
||||
with_metadata: bool = True,
|
||||
):
|
||||
self.check_id = check_id
|
||||
self.resource_name = resource_name
|
||||
self.resource_uid = resource_uid
|
||||
self.status = status
|
||||
self.region = region
|
||||
if with_metadata:
|
||||
self.metadata = _DummyMetadata()
|
||||
else:
|
||||
self.metadata = None
|
||||
|
||||
|
||||
def _make_concrete_generator():
|
||||
"""Return a minimal concrete subclass of BaseComplianceReportGenerator."""
|
||||
|
||||
class _Concrete(BaseComplianceReportGenerator):
|
||||
def create_executive_summary(self, data):
|
||||
return []
|
||||
|
||||
def create_charts_section(self, data):
|
||||
return []
|
||||
|
||||
def create_requirements_index(self, data):
|
||||
return []
|
||||
|
||||
return _Concrete(FrameworkConfig(name="test", display_name="Test"))
|
||||
|
||||
|
||||
class TestFindingsTableChunking:
|
||||
"""Tests for ``_create_findings_tables`` (PROWLER-1733)."""
|
||||
|
||||
def test_chunking_produces_expected_number_of_subtables(self):
|
||||
"""5000 findings @ chunk_size=300 → 17 sub-tables + 16 spacers."""
|
||||
generator = _make_concrete_generator()
|
||||
findings = [_DummyFinding(check_id="c1") for _ in range(5000)]
|
||||
|
||||
flowables = generator._create_findings_tables(findings, chunk_size=300)
|
||||
|
||||
tables = [f for f in flowables if isinstance(f, (Table, LongTable))]
|
||||
spacers = [f for f in flowables if isinstance(f, Spacer)]
|
||||
# ceil(5000 / 300) == 17
|
||||
assert len(tables) == 17
|
||||
# Spacer between every pair of contiguous tables, not after the last
|
||||
assert len(spacers) == 16
|
||||
|
||||
def test_chunk_size_param_overrides_default(self):
|
||||
"""250 findings @ chunk_size=100 → 3 sub-tables."""
|
||||
generator = _make_concrete_generator()
|
||||
findings = [_DummyFinding(check_id="c2") for _ in range(250)]
|
||||
|
||||
flowables = generator._create_findings_tables(findings, chunk_size=100)
|
||||
tables = [f for f in flowables if isinstance(f, (Table, LongTable))]
|
||||
assert len(tables) == 3
|
||||
|
||||
def test_empty_findings_returns_empty_list(self):
|
||||
"""No findings → no flowables. Callers can extend(...) safely."""
|
||||
generator = _make_concrete_generator()
|
||||
assert generator._create_findings_tables([]) == []
|
||||
|
||||
def test_single_chunk_has_no_spacer(self):
|
||||
"""A single sub-table must not emit a trailing spacer."""
|
||||
generator = _make_concrete_generator()
|
||||
findings = [_DummyFinding(check_id="c3") for _ in range(10)]
|
||||
|
||||
flowables = generator._create_findings_tables(findings, chunk_size=300)
|
||||
assert len(flowables) == 1
|
||||
assert isinstance(flowables[0], (Table, LongTable))
|
||||
|
||||
def test_malformed_finding_is_skipped(self):
|
||||
"""A broken finding must not abort the report; it is logged and skipped."""
|
||||
generator = _make_concrete_generator()
|
||||
|
||||
class _Broken:
|
||||
# No attributes at all; getattr() defaults will mostly cope, but
|
||||
# we force an explicit error by making the metadata attribute
|
||||
# itself raise on access.
|
||||
@property
|
||||
def metadata(self):
|
||||
raise RuntimeError("boom")
|
||||
|
||||
check_id = "broken"
|
||||
|
||||
findings = [
|
||||
_DummyFinding(check_id="c4"),
|
||||
_Broken(),
|
||||
_DummyFinding(check_id="c4"),
|
||||
]
|
||||
flowables = generator._create_findings_tables(findings, chunk_size=300)
|
||||
# Two good rows → one sub-table containing them; the broken one is
|
||||
# logged and dropped, not propagated.
|
||||
tables = [f for f in flowables if isinstance(f, (Table, LongTable))]
|
||||
assert len(tables) == 1
|
||||
|
||||
def test_create_findings_table_alias_returns_first_chunk(self):
|
||||
"""The deprecated alias must keep returning a single Table flowable."""
|
||||
generator = _make_concrete_generator()
|
||||
findings = [_DummyFinding(check_id="c5") for _ in range(700)]
|
||||
|
||||
first = generator._create_findings_table(findings)
|
||||
assert isinstance(first, (Table, LongTable))
|
||||
|
||||
def test_create_findings_table_alias_empty(self):
|
||||
"""Alias on empty input returns an empty (header-only) Table, not None."""
|
||||
generator = _make_concrete_generator()
|
||||
result = generator._create_findings_table([])
|
||||
# The legacy alias never returned None; an empty header-only table
|
||||
# is a strict superset of that contract.
|
||||
assert isinstance(result, (Table, LongTable))
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Logging Context Manager Tests (PROWLER-1733)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestLogPhaseContextManager:
|
||||
"""Tests for ``_log_phase`` (PROWLER-1733).
|
||||
|
||||
The context manager emits structured ``phase_start`` / ``phase_end``
|
||||
logs with ``scan_id``, ``framework`` and ``elapsed_s``, so Datadog/
|
||||
CloudWatch queries can pivot by scan and find the slow section.
|
||||
"""
|
||||
|
||||
def test_emits_start_and_end_with_elapsed_and_rss(self, caplog):
|
||||
from tasks.jobs.reports.base import _log_phase
|
||||
|
||||
caplog.set_level("INFO", logger="tasks.jobs.reports.base")
|
||||
with _log_phase("unit_test_phase", scan_id="s-1", framework="Test FW"):
|
||||
pass
|
||||
|
||||
messages = [r.getMessage() for r in caplog.records]
|
||||
starts = [m for m in messages if "phase_start" in m]
|
||||
ends = [m for m in messages if "phase_end" in m]
|
||||
|
||||
assert len(starts) == 1 and len(ends) == 1
|
||||
assert "phase=unit_test_phase" in starts[0]
|
||||
assert "scan_id=s-1" in starts[0]
|
||||
assert "framework=Test FW" in starts[0]
|
||||
assert "elapsed_s=" in ends[0]
|
||||
assert "rss_kb=" in ends[0]
|
||||
assert "delta_rss_kb=" in ends[0]
|
||||
|
||||
def test_failure_logs_phase_failed_and_reraises(self, caplog):
|
||||
from tasks.jobs.reports.base import _log_phase
|
||||
|
||||
caplog.set_level("INFO", logger="tasks.jobs.reports.base")
|
||||
with pytest.raises(RuntimeError, match="boom"):
|
||||
with _log_phase("failing_phase", scan_id="s-2", framework="FW"):
|
||||
raise RuntimeError("boom")
|
||||
|
||||
messages = [r.getMessage() for r in caplog.records]
|
||||
assert any("phase_failed" in m and "failing_phase" in m for m in messages)
|
||||
# No phase_end on the failure path.
|
||||
assert not any("phase_end" in m for m in messages)
|
||||
|
||||
@@ -10,10 +10,10 @@ This repository contains the Prowler Open Source documentation powered by [Mintl
|
||||
|
||||
## Local Development
|
||||
|
||||
Install the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview documentation changes locally:
|
||||
Install a reviewed version of the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview documentation changes locally:
|
||||
|
||||
```bash
|
||||
npm i -g mint
|
||||
npm install --global mint@4.2.560
|
||||
```
|
||||
|
||||
Run the following command at the root of your documentation (where `mint.json` is located):
|
||||
|
||||
@@ -28,7 +28,7 @@ This includes the [AGENTS.md](https://github.com/prowler-cloud/prowler/blob/mast
|
||||
<Steps>
|
||||
<Step title="Install Mintlify CLI">
|
||||
```bash
|
||||
npm i -g mint
|
||||
npm install --global mint@4.2.560
|
||||
```
|
||||
For detailed instructions, check the [Mintlify documentation](https://www.mintlify.com/docs/installation).
|
||||
</Step>
|
||||
|
||||
@@ -332,6 +332,13 @@
|
||||
"user-guide/providers/vercel/getting-started-vercel",
|
||||
"user-guide/providers/vercel/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Okta",
|
||||
"pages": [
|
||||
"user-guide/providers/okta/getting-started-okta",
|
||||
"user-guide/providers/okta/authentication"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@ Complete reference guide for all tools available in the Prowler MCP Server. Tool
|
||||
|----------|------------|------------------------|
|
||||
| Prowler Hub | 10 tools | No |
|
||||
| Prowler Documentation | 2 tools | No |
|
||||
| Prowler Cloud/App | 29 tools | Yes |
|
||||
| Prowler Cloud/App | 32 tools | Yes |
|
||||
|
||||
## Tool Naming Convention
|
||||
|
||||
@@ -36,6 +36,14 @@ Tools for searching, viewing, and analyzing security findings across all cloud p
|
||||
- **`prowler_app_get_finding_details`** - Get comprehensive details about a specific finding including remediation guidance, check metadata, and resource relationships
|
||||
- **`prowler_app_get_findings_overview`** - Get aggregate statistics and trends about security findings as a markdown report
|
||||
|
||||
### Finding Groups Management
|
||||
|
||||
Tools for listing finding groups aggregated by check ID, viewing complete group counters, and drilling down into affected resources.
|
||||
|
||||
- **`prowler_app_list_finding_groups`** - List latest or historical finding groups with filters for provider, region, service, resource, category, check, severity, status, muted state, delta, date range, and sorting
|
||||
- **`prowler_app_get_finding_group_details`** - Get complete details for a specific finding group including counters, description, timestamps, and impacted providers
|
||||
- **`prowler_app_list_finding_group_resources`** - List actionable unmuted resources affected by a finding group by default, including nested resource and provider data plus the `finding_id` for remediation details. Set `include_muted` to include suppressed resources
|
||||
|
||||
### Provider Management
|
||||
|
||||
Tools for managing cloud provider connections in Prowler.
|
||||
|
||||
@@ -44,13 +44,21 @@ Choose the configuration based on your deployment:
|
||||
|
||||
<Tab title="Generic without Native HTTP Support">
|
||||
**Configuration:**
|
||||
<Warning>
|
||||
Avoid configuring MCP clients to run `npx mcp-remote` directly. `npx` can download and execute a new package version on each run. Install a reviewed version of `mcp-remote` in a dedicated local workspace, then point the MCP client to the installed binary.
|
||||
</Warning>
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp", // or your self-hosted Prowler MCP Server URL
|
||||
"--header",
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
@@ -72,14 +80,20 @@ Choose the configuration based on your deployment:
|
||||
2. Go to "Developer" tab
|
||||
3. Click in "Edit Config" button
|
||||
4. Edit the `claude_desktop_config.json` file with your favorite editor
|
||||
5. Add the following configuration:
|
||||
5. Install a reviewed version of `mcp-remote` in a dedicated local workspace:
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
6. Add the following configuration:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp",
|
||||
"--header",
|
||||
"Authorization: Bearer ${PROWLER_APP_API_KEY}"
|
||||
|
||||
@@ -38,7 +38,7 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
|
||||
- `git` installed.
|
||||
- `poetry` installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
- `npm` installed: [npm installation](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm).
|
||||
- `pnpm` installed through [Corepack](https://pnpm.io/installation#using-corepack) or the standalone [pnpm installation](https://pnpm.io/installation).
|
||||
- `Docker Compose` installed: https://docs.docker.com/compose/install/.
|
||||
|
||||
<Warning>
|
||||
@@ -97,9 +97,11 @@ Refer to the [Prowler App Tutorial](/user-guide/tutorials/prowler-app) for detai
|
||||
```bash
|
||||
git clone https://github.com/prowler-cloud/prowler \
|
||||
cd prowler/ui \
|
||||
npm install \
|
||||
npm run build \
|
||||
npm start
|
||||
corepack enable \
|
||||
corepack install \
|
||||
pnpm install --frozen-lockfile \
|
||||
pnpm run build \
|
||||
pnpm start
|
||||
```
|
||||
|
||||
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
|
||||
|
||||
@@ -47,11 +47,12 @@ Prowler supports a wide range of providers organized by category:
|
||||
| Provider | Support | Audit Scope/Entities | Interface |
|
||||
| ----------------------------------------------------------------------------------------- | -------- | ---------------------------- | ------------ |
|
||||
| [GitHub](/user-guide/providers/github/getting-started-github) | Official | Organizations / Repositories | UI, API, CLI |
|
||||
| [Google Workspace](/user-guide/providers/googleworkspace/getting-started-googleworkspace) | Official | Domains | CLI |
|
||||
| [Google Workspace](/user-guide/providers/googleworkspace/getting-started-googleworkspace) | Official | Domains | UI, API, CLI |
|
||||
| [LLM](/user-guide/providers/llm/getting-started-llm) | Official | Models | CLI |
|
||||
| [M365](/user-guide/providers/microsoft365/getting-started-m365) | Official | Tenants | UI, API, CLI |
|
||||
| [MongoDB Atlas](/user-guide/providers/mongodbatlas/getting-started-mongodbatlas) | Official | Organizations | UI, API, CLI |
|
||||
| [Vercel](/user-guide/providers/vercel/getting-started-vercel) | Official | Teams / Projects | CLI |
|
||||
| [Okta](/user-guide/providers/okta/getting-started-okta) | Official | Organizations | CLI |
|
||||
| [Vercel](/user-guide/providers/vercel/getting-started-vercel) | Official | Teams / Projects | UI, API, CLI |
|
||||
|
||||
### Kubernetes
|
||||
|
||||
|
||||
@@ -158,6 +158,15 @@ The following list includes all the Vercel checks with configurable variables th
|
||||
| `team_member_role_least_privilege` | `max_owners` | Integer |
|
||||
| `team_no_stale_invitations` | `stale_invitation_threshold_days` | Integer |
|
||||
|
||||
## Okta
|
||||
|
||||
### Configurable Checks
|
||||
The following list includes all the Okta checks with configurable variables that can be changed in the configuration YAML file:
|
||||
|
||||
| Check Name | Value | Type |
|
||||
|---------------------------------------------------------------|------------------------------------|---------|
|
||||
| `signon_global_session_idle_timeout_15min` | `okta_max_session_idle_minutes` | Integer |
|
||||
|
||||
## Config YAML File Structure
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -18,9 +18,11 @@ prowler <provider> --scan-unused-services
|
||||
|
||||
#### ACM (AWS Certificate Manager)
|
||||
|
||||
Certificates stored in ACM without active usage in AWS resources are excluded. By default, Prowler only scans actively used certificates. Unused certificates will not be checked if they are expired, if their expiring date is near or if they are good.
|
||||
Certificates stored in ACM without active usage in AWS resources are excluded. By default, Prowler only scans actively used certificates. Unused certificates are not evaluated for expiration, transparency logging, or weak key algorithms.
|
||||
|
||||
- `acm_certificates_expiration_check`
|
||||
- `acm_certificates_transparency_logs_enabled`
|
||||
- `acm_certificates_with_secure_key_algorithms`
|
||||
|
||||
#### Athena
|
||||
|
||||
@@ -28,6 +30,13 @@ Upon AWS account creation, Athena provisions a default primary workgroup for the
|
||||
|
||||
- `athena_workgroup_encryption`
|
||||
- `athena_workgroup_enforce_configuration`
|
||||
- `athena_workgroup_logging_enabled`
|
||||
|
||||
#### Amazon Bedrock
|
||||
|
||||
Generative AI workloads benefit from private VPC endpoint connectivity to keep prompt and model traffic off the public internet. Prowler only evaluates this configuration for VPCs in use (with active ENIs).
|
||||
|
||||
- `bedrock_vpc_endpoints_configured`
|
||||
|
||||
#### AWS CloudTrail
|
||||
|
||||
@@ -38,15 +47,23 @@ AWS CloudTrail should have at least one trail with a data event to record all S3
|
||||
|
||||
#### AWS Elastic Compute Cloud (EC2)
|
||||
|
||||
If Amazon Elastic Block Store (EBS) default encyption is not enabled, sensitive data at rest will remain unprotected in EC2. However, Prowler will only generate a finding if EBS volumes exist where default encryption could be enforced.
|
||||
If Amazon Elastic Block Store (EBS) default encryption is not enabled, sensitive data at rest remains unprotected in EC2. Prowler only generates a finding if EBS volumes exist where default encryption could be enforced.
|
||||
|
||||
- `ec2_ebs_default_encryption`
|
||||
|
||||
**EBS Snapshot Public Access**: Public EBS snapshots can leak data. Prowler only evaluates the account-level block setting if EBS snapshots exist in the account.
|
||||
|
||||
- `ec2_ebs_snapshot_account_block_public_access`
|
||||
|
||||
**EC2 Instance Metadata Service (IMDS)**: Enforcing IMDSv2 at the account level mitigates SSRF-based credential theft. Prowler only evaluates the account-level setting if EC2 instances exist in the account.
|
||||
|
||||
- `ec2_instance_account_imdsv2_enabled`
|
||||
|
||||
**Security Groups**: Misconfigured security groups increase the attack surface.
|
||||
|
||||
Prowler scans only attached security groups to report vulnerabilities in actively used configurations. Applies to:
|
||||
|
||||
- 15 security group-related checks, including open ports and ingress/egress traffic rules.
|
||||
- 20 security group-related checks, including open ports and ingress/egress traffic rules.
|
||||
|
||||
- `ec2_securitygroup_allow_ingress_from_internet_to_port_X`
|
||||
- `ec2_securitygroup_default_restrict_traffic`
|
||||
@@ -56,6 +73,18 @@ Prowler scans only attached security groups to report vulnerabilities in activel
|
||||
|
||||
- `ec2_networkacl_allow_ingress_X_port`
|
||||
|
||||
#### AWS Identity and Access Management (IAM)
|
||||
|
||||
Customer-managed IAM policies that are not attached to any user, group, or role grant no effective permissions until a principal is bound to them. Prowler treats such policies as dormant by default and skips the content-evaluation checks below when `--scan-unused-services` is not set. Enable the flag to surface findings on unattached policies as well.
|
||||
|
||||
- `iam_policy_allows_privilege_escalation`
|
||||
- `iam_policy_no_full_access_to_cloudtrail`
|
||||
- `iam_policy_no_full_access_to_kms`
|
||||
- `iam_policy_no_wildcard_marketplace_subscribe`
|
||||
- `iam_no_custom_policy_permissive_role_assumption`
|
||||
|
||||
The dedicated `iam_customer_unattached_policy_no_administrative_privileges` check still inspects unattached policies regardless of the flag, since its purpose is to highlight dormant administrator privileges.
|
||||
|
||||
#### AWS Glue
|
||||
|
||||
AWS Glue best practices recommend encrypting metadata and connection passwords in Data Catalogs.
|
||||
@@ -71,6 +100,12 @@ Amazon Inspector is a vulnerability discovery service that automates continuous
|
||||
|
||||
- `inspector2_is_enabled`
|
||||
|
||||
#### AWS Key Management Service (KMS)
|
||||
|
||||
Customer managed Customer Master Keys (CMKs) in the `Disabled` state cannot be used for cryptographic operations, so Prowler skips the unintentional-deletion check on them by default. Enable the flag to evaluate disabled CMKs as well.
|
||||
|
||||
- `kms_cmk_not_deleted_unintentionally`
|
||||
|
||||
#### Amazon Macie
|
||||
|
||||
Amazon Macie leverages machine learning to automatically discover, classify, and protect sensitive data in S3 buckets. Prowler only generates findings if Macie is disabled and there are S3 buckets in the AWS account.
|
||||
@@ -83,6 +118,15 @@ A network firewall is essential for monitoring and controlling traffic within a
|
||||
|
||||
- `networkfirewall_in_all_vpc`
|
||||
|
||||
#### Amazon Relational Database Service (RDS)
|
||||
|
||||
RDS event subscriptions notify operators of critical database events. Prowler only evaluates these subscription checks when RDS clusters or instances exist in the account.
|
||||
|
||||
- `rds_cluster_critical_event_subscription`
|
||||
- `rds_instance_critical_event_subscription`
|
||||
- `rds_instance_event_subscription_parameter_groups`
|
||||
- `rds_instance_event_subscription_security_groups`
|
||||
|
||||
#### Amazon S3
|
||||
|
||||
To prevent unintended data exposure:
|
||||
@@ -99,6 +143,10 @@ VPC settings directly impact network security and availability.
|
||||
|
||||
- `vpc_flow_logs_enabled`
|
||||
|
||||
- VPC Endpoint for EC2: Routes EC2 API calls through a private VPC endpoint to keep traffic off the public internet. Prowler only evaluates this configuration for VPCs in use, i.e., those with active ENIs.
|
||||
|
||||
- `vpc_endpoint_for_ec2_enabled`
|
||||
|
||||
- VPC Subnet Public IP Restrictions: Prevent unintended exposure of resources to the internet. Prowler only checks this configuration for VPCs in use, i.e., those with active ENIs.
|
||||
|
||||
- `vpc_subnet_no_public_ip_by_default`
|
||||
|
||||
@@ -149,14 +149,6 @@ Prowler Cloud and App expose two formats:
|
||||
* **CSV report:** Every requirement, every check, and every finding for the selected scan and filters. Available for all supported frameworks.
|
||||
* **PDF report:** Curated executive-style report. Currently supported for Prowler ThreatScore, ENS RD2022, NIS2, and CSA CCM. Additional PDF reports are added in subsequent Prowler releases.
|
||||
|
||||
<Note>
|
||||
**PDF detail section is capped at the first 100 failed findings per check.** The PDF is intended as an executive/auditor document, not a raw data dump: when a check produces more than 100 failed findings the report renders the first 100 and shows a banner pointing the reader to the CSV or JSON export for the complete list. The CSV and the ZIP scan output are never truncated.
|
||||
|
||||
The cap is configurable per deployment via the `DJANGO_PDF_MAX_FINDINGS_PER_CHECK` environment variable on the Prowler API workers; set it to `0` to disable truncation entirely. The default value of `100` keeps the PDF readable and bounded in size on enterprise-scale scans (hundreds of thousands of findings) without affecting smaller scans, where the cap is rarely reached.
|
||||
|
||||
Only **failed** findings are rendered in the detail section. PASS findings for the same check are excluded at query time. The PDF surfaces what needs attention, and the CSV/JSON exports surface everything for forensic review.
|
||||
</Note>
|
||||
|
||||
#### Downloading From the Detail Page
|
||||
|
||||
Inside any framework detail page, the **CSV** and **PDF** buttons in the header trigger the same downloads as the overview dropdown. The PDF button only appears for frameworks that support it.
|
||||
|
||||
@@ -22,7 +22,7 @@ Install promptfoo using one of the following methods:
|
||||
|
||||
**Using npm:**
|
||||
```bash
|
||||
npm install -g promptfoo
|
||||
npm install --global promptfoo@0.121.11
|
||||
```
|
||||
|
||||
**Using Homebrew (macOS):**
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
---
|
||||
title: 'Okta Authentication in Prowler'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
<VersionBadge version="5.27.0" />
|
||||
|
||||
Prowler authenticates to Okta as a **service application** using **OAuth 2.0 with a private-key JWT** (Client Credentials grant). The integration is read-only by scope and follows DISA STIG guidance for least-privilege access.
|
||||
|
||||
## Common Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- An Okta organization. The UI examples below use **Identity Engine** terminology such as **Global Session Policy**; Classic Engine exposes equivalent sign-on policy concepts under older naming.
|
||||
- A **Super Administrator** account on that organization for the one-time service-app setup.
|
||||
- An **API Services** app integration created in the Okta Admin Console.
|
||||
|
||||
### Authentication Method Overview
|
||||
|
||||
| Method | Status | Use Case |
|
||||
|---|---|---|
|
||||
| **OAuth 2.0 (private-key JWT)** | Supported | Production scans, CI/CD, Prowler App. |
|
||||
|
||||
The private-key JWT flow is the only supported authentication method in the initial release. The service application proves possession of a private key on every token request; Okta returns a short-lived access token, refreshed automatically by the SDK.
|
||||
|
||||
<Note>
|
||||
If a different authentication method is needed (SSWS API token, OAuth with user delegation, etc.), please open a [feature request](https://github.com/prowler-cloud/prowler/issues/new?template=feature-request.yml) describing the use case.
|
||||
</Note>
|
||||
|
||||
### Required OAuth Scopes
|
||||
|
||||
For the initial check (`signon_global_session_idle_timeout_15min`) only one scope is required:
|
||||
|
||||
- `okta.policies.read`
|
||||
|
||||
Additional scopes will be needed as more services and checks are added, this are the current ones needed:
|
||||
|
||||
| Scope | Used by |
|
||||
|---|---|
|
||||
| `okta.policies.read` | Sign-on / password / authentication policies |
|
||||
|
||||
### Required Admin Role
|
||||
|
||||
The service application must be assigned the built-in **Read-Only Administrator** role.
|
||||
|
||||
Okta's Management API enforces a two-layer authorization model: an OAuth **scope** decides which API endpoints the token can call, and an **admin role** decides whether the call returns data. With only a scope granted, the token mint succeeds but every read returns `403 Forbidden`. The Read-Only Administrator role is the minimum that lets the granted `okta.*.read` scopes actually return configuration data to Prowler's checks — without it, the credential probe at provider startup fails and the scan never gets to evaluate any check.
|
||||
|
||||
Read-Only Administrator is intentionally the narrowest role that satisfies this requirement and aligns with the least-privilege guidance in DISA STIG.
|
||||
|
||||
## Step-by-Step Setup
|
||||
|
||||
### 1. Go to the admin console
|
||||
|
||||

|
||||
|
||||
### 2. [Optional] - Disable the privilege-escalation bypass (org-wide, one-time)
|
||||
|
||||
In the Okta Admin Console, go to **Settings → Account → Public client app admins** and ensure it is **off**. When enabled, every API Services app can be auto-assigned the Super Administrator role after scopes are granted, which would invalidate the read-only premise of this integration.
|
||||
|
||||

|
||||
|
||||
### 3. Create the API Services app
|
||||
|
||||
1. Go to **Applications → Applications**.
|
||||
|
||||

|
||||
|
||||
2. **Create App Integration**
|
||||
|
||||

|
||||
|
||||
3. Sign-in method: **API Services**. Click **Next**.
|
||||
4. Name the app (for example, `Prowler Scanner`) and click **Save**.
|
||||
5. Copy the displayed **Client ID** — you'll use it as `OKTA_CLIENT_ID`.
|
||||
|
||||

|
||||
|
||||
### 4. Switch to private-key authentication and generate a keypair
|
||||
|
||||
On the new app's **General** tab, scroll to **Client Credentials**:
|
||||
|
||||
1. Click **Edit**.
|
||||
2. Set **Client authentication** to **Public key / Private key**.
|
||||
3. Under **Public Keys**, click **Add key**.
|
||||
4. In the modal, click **Generate new key**. Okta creates a JWK pair.
|
||||
5. Click the **PEM** tab to switch the displayed format (or keep JWK — Prowler accepts both).
|
||||
6. Copy the entire `-----BEGIN PRIVATE KEY-----` block (or the JWK JSON).
|
||||
7. Click **Done**, then **Save**.
|
||||
|
||||
<Warning>
|
||||
Okta displays the private key **only once**. If you close the modal without copying, you must generate a new key.
|
||||
</Warning>
|
||||
|
||||

|
||||
|
||||
### 5. Grant the required OAuth scopes
|
||||
|
||||
On the app, open the **Okta API Scopes** tab and click **Grant** on every scope Prowler needs. For the initial release, granting only `okta.policies.read` is sufficient.
|
||||
|
||||

|
||||
|
||||
### 6. Assign the Read-Only Administrator role
|
||||
|
||||
On the app, open the **Admin roles** tab and click **Edit assignments → Add assignment**:
|
||||
|
||||
- **Role:** Read-Only Administrator
|
||||
- **Resources:** All resources
|
||||
|
||||
Save the changes.
|
||||
|
||||

|
||||
|
||||
### 7. [Optional] Verify DPoP setting
|
||||
|
||||
Prowler sends DPoP (Demonstrating Proof of Possession) proofs on every token request. The integration works whether the **Require Demonstrating Proof of Possession (DPoP) header in token requests** setting on the service app is on or off — but enabling it is the more secure default.
|
||||
|
||||
## Prowler CLI Authentication
|
||||
|
||||
### Using Environment Variables (Required for Secrets)
|
||||
|
||||
Private key material **must** be supplied via environment variables — Prowler does not accept secrets through CLI flags.
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="YOUR-ORG.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
|
||||
# Either of the two — content takes precedence over file when both are set.
|
||||
export OKTA_PRIVATE_KEY_FILE="/secure/path/to/prowler-okta.pem"
|
||||
# or
|
||||
export OKTA_PRIVATE_KEY="$(cat /secure/path/to/prowler-okta.pem)"
|
||||
|
||||
# Optional — defaults to "okta.policies.read"
|
||||
export OKTA_SCOPES="okta.policies.read"
|
||||
|
||||
poetry run python prowler-cli.py okta
|
||||
```
|
||||
|
||||
### Non-Secret CLI Flags
|
||||
|
||||
Non-secret values are also available as CLI flags for ergonomic overrides:
|
||||
|
||||
| Flag | Equivalent env var |
|
||||
|---|---|
|
||||
| `--okta-org-domain` | `OKTA_ORG_DOMAIN` |
|
||||
| `--okta-client-id` | `OKTA_CLIENT_ID` |
|
||||
| `--okta-scopes` | `OKTA_SCOPES` |
|
||||
|
||||
Run a single check directly:
|
||||
|
||||
```bash
|
||||
poetry run python prowler-cli.py okta --check signon_global_session_idle_timeout_15min
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `OktaInvalidOrgDomainError`
|
||||
|
||||
The org domain must be `<org>.okta.com` (or `.oktapreview.com` / `.okta-emea.com` / `.okta-gov.com` / `.okta.mil` / `.okta-miltest.com` / `.trex-govcloud.com`). Pass the bare hostname only — no `https://` scheme, no path, no trailing slash. Custom (vanity) domains are not currently accepted.
|
||||
|
||||
### `OktaPrivateKeyFileError`
|
||||
|
||||
The file at `OKTA_PRIVATE_KEY_FILE` is missing, unreadable, or empty. Confirm the path and that the file contains a non-empty PEM block or JWK JSON document.
|
||||
|
||||
### `OktaInvalidCredentialsError` at provider init
|
||||
|
||||
Prowler validates credentials at startup by listing one sign-on policy. This error indicates the credential material itself was rejected:
|
||||
|
||||
- **`invalid_client`** — the public key registered in Okta does not match the private key on disk. Generate a fresh keypair and try again.
|
||||
|
||||
### `OktaInsufficientPermissionsError` at provider init
|
||||
|
||||
Raised when the credential probe succeeds at the OAuth layer but the request is rejected because the service app lacks the required scope or admin role:
|
||||
|
||||
- **`invalid_scope`** — the `okta.policies.read` scope is not granted on the service app. Grant it from **Okta API Scopes**.
|
||||
- **`Forbidden` / `not authorized`** — the **Read-Only Administrator** role is not assigned to the service app. Assign it from **Admin roles**.
|
||||
|
||||
### `invalid_dpop_proof`
|
||||
|
||||
The org or the service app requires DPoP. The provider always sends DPoP proofs, so this error indicates the SDK could not build a valid proof — typically because the private key on disk does not match the public key uploaded to Okta. Regenerate the keypair.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Implement OAuth 2.0 for an Okta service app](https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/)
|
||||
- [Okta Policy API reference](https://developer.okta.com/docs/api/openapi/okta-management/management/tag/Policy/)
|
||||
- [DISA STIG for Okta (V-273186)](https://stigviewer.com/stigs/okta/)
|
||||
@@ -0,0 +1,144 @@
|
||||
---
|
||||
title: 'Getting Started With Okta on Prowler'
|
||||
---
|
||||
|
||||
import { VersionBadge } from "/snippets/version-badge.mdx"
|
||||
|
||||
Prowler for Okta scans an Okta organization for identity and session-management misconfigurations. The provider authenticates as a service application using **OAuth 2.0 with a private-key JWT** (Client Credentials grant) — no end-user login, read-only by scope.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Set up authentication for Okta with the [Okta Authentication](/user-guide/providers/okta/authentication) guide before starting:
|
||||
|
||||
- An Okta organization. The UI examples below use **Identity Engine** terminology such as **Global Session Policy**; Classic Engine exposes the equivalent sign-on policy concepts under older names.
|
||||
- A **Super Administrator** account on that organization for the one-time service-app setup.
|
||||
- An **API Services** app integration in the Okta Admin Console with the `okta.policies.read` scope granted and the **Read-Only Administrator** role assigned.
|
||||
- Python 3.10+ and Prowler 5.27.0 or later installed locally.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Prowler Cloud" icon="cloud" href="#prowler-cloud">
|
||||
Onboard Okta using Prowler Cloud
|
||||
</Card>
|
||||
<Card title="Prowler CLI" icon="terminal" href="#prowler-cli">
|
||||
Onboard Okta using Prowler CLI
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Prowler Cloud
|
||||
|
||||
<Note>
|
||||
Prowler Cloud onboarding for Okta is coming soon. Track the [Prowler GitHub repository](https://github.com/prowler-cloud/prowler) for release updates. Use the [Prowler CLI](#prowler-cli) workflow below in the meantime.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Prowler CLI
|
||||
|
||||
<VersionBadge version="5.27.0" />
|
||||
|
||||
### Step 1: Set Up Authentication
|
||||
|
||||
Follow the [Okta Authentication](/user-guide/providers/okta/authentication) guide to create the service application, generate a keypair, grant scopes, and assign the Read-Only Administrator role. Then export the credentials:
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="acme.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
export OKTA_PRIVATE_KEY_FILE="/secure/path/to/prowler-okta.pem"
|
||||
# Optional — defaults to "okta.policies.read"
|
||||
export OKTA_SCOPES="okta.policies.read"
|
||||
```
|
||||
|
||||
The private key file may contain either a PEM-encoded RSA key or a JWK JSON document.
|
||||
|
||||
#### Supplying the Private Key as Content
|
||||
|
||||
For automated environments where writing the key to disk is not desirable (CI runners, container secrets, etc.), the private key may be passed directly as a string:
|
||||
|
||||
```bash
|
||||
export OKTA_ORG_DOMAIN="acme.okta.com"
|
||||
export OKTA_CLIENT_ID="0oa1234567890abcdef"
|
||||
export OKTA_PRIVATE_KEY="$(cat /secure/path/to/prowler-okta.pem)"
|
||||
```
|
||||
|
||||
`OKTA_PRIVATE_KEY` takes precedence over `OKTA_PRIVATE_KEY_FILE` when both are set. The private key is intentionally not exposed as a CLI flag — secrets must be supplied via environment variables only.
|
||||
|
||||
### Step 2: Run the First Scan
|
||||
|
||||
Run a baseline scan after credentials are configured:
|
||||
|
||||
```bash
|
||||
prowler okta
|
||||
```
|
||||
|
||||
Or run a specific check directly:
|
||||
|
||||
```bash
|
||||
prowler okta --check signon_global_session_idle_timeout_15min
|
||||
```
|
||||
|
||||
Prowler prints a summary table; full findings are written to the configured output formats.
|
||||
|
||||
### Step 3: Use a Custom Configuration (Optional)
|
||||
|
||||
Prowler uses a configuration file to customize check thresholds. The Okta configuration currently includes:
|
||||
|
||||
```yaml
|
||||
okta:
|
||||
# okta.signon_global_session_idle_timeout_15min
|
||||
# Defaults to 15 minutes per DISA STIG V-273186.
|
||||
okta_max_session_idle_minutes: 15
|
||||
```
|
||||
|
||||
To use a custom configuration:
|
||||
|
||||
```bash
|
||||
prowler okta --config-file /path/to/config.yaml
|
||||
```
|
||||
|
||||
## Supported Services
|
||||
|
||||
Prowler for Okta includes security checks across the following services:
|
||||
|
||||
| Service | Description |
|
||||
| ----------- | ----------------------------------------------------------------------------------- |
|
||||
| **Sign-On** | Global session policy controls (idle timeout, lifetime, rule priority and ordering) |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### STIG Rule Ordering
|
||||
|
||||
The initial check is mapped to DISA STIG `V-273186` / `OKTA-APP-000020`. Prowler implements the STIG procedure as written: the **Default Policy** must have a **Priority 1** rule that is **not** `Default Rule`, and that rule must set **Maximum Okta global session idle time** to 15 minutes or less.
|
||||
|
||||
This is stricter than simply finding the same timeout value somewhere else in the policy set. A compliant custom rule in another policy, or a compliant timeout on the built-in `Default Rule`, does not satisfy this STIG procedure.
|
||||
|
||||
### Default Scopes
|
||||
|
||||
Prowler requests a fixed set of OAuth scopes on every token exchange. The default is a single scope that covers the bundled initial check:
|
||||
|
||||
- `okta.policies.read`
|
||||
|
||||
The service app must have that scope granted in the **Okta API Scopes** tab. When the granted set is narrower than the requested set, the token request fails with an `invalid_scope` error and the scan stops at provider initialization.
|
||||
|
||||
When additional checks are enabled — or when running against a service app that exposes a different scope set — override the default with `OKTA_SCOPES` (comma-separated string for the env var) or `--okta-scopes` (space-separated list for the CLI):
|
||||
|
||||
```bash
|
||||
# Environment variable — comma-separated
|
||||
export OKTA_SCOPES="okta.policies.read,okta.apps.read,okta.users.read"
|
||||
|
||||
# CLI flag — space-separated
|
||||
prowler okta --okta-scopes okta.policies.read okta.apps.read okta.users.read
|
||||
```
|
||||
|
||||
For the full catalog of OAuth scopes exposed by the Okta Management API, refer to the [Okta OAuth 2.0 scopes documentation](https://developer.okta.com/docs/api/oauth2/).
|
||||
|
||||
<Note>
|
||||
As new services and checks land in the Okta provider, the default scope list grows alongside them. Re-check the granted scopes on the service app after each Prowler upgrade and grant any newly required `okta.*.read` scopes in the Admin Console.
|
||||
</Note>
|
||||
|
||||
### Common Errors
|
||||
|
||||
- **`OktaInvalidOrgDomainError`** — the org domain must be `<org>.okta.com` (or `.oktapreview.com` / `.okta-emea.com` / `.okta-gov.com` / `.okta.mil` / `.okta-miltest.com` / `.trex-govcloud.com`). Pass the bare hostname only — no `https://` scheme, no path, no trailing slash.
|
||||
- **`OktaPrivateKeyFileError`** — confirm the file is readable and contains a non-empty PEM or JWK body.
|
||||
- **`OktaInsufficientPermissionsError`** — the credential probe reached Okta but the service app cannot perform the request. The error string carries `invalid_scope`, `Forbidden`, `not authorized`, or `permission`. Fix by granting the missing `okta.*.read` scope from **Okta API Scopes** and confirming the **Read-Only Administrator** role is assigned to the service app.
|
||||
- **`OktaInvalidCredentialsError`** — the credential probe reached Okta but Okta rejected the JWT. Typically the private key on disk does not match the public JWK uploaded to the service app, or the JWT signing parameters are wrong. Regenerate the keypair and re-upload the public JWK.
|
||||
- **Token requests failing for an unknown scope** — the app was granted a narrower scope set than `OKTA_SCOPES` requests. Either narrow `OKTA_SCOPES` or grant the missing scopes in the Admin Console.
|
||||
|
After Width: | Height: | Size: 159 KiB |
|
After Width: | Height: | Size: 134 KiB |
|
After Width: | Height: | Size: 173 KiB |
|
After Width: | Height: | Size: 127 KiB |
|
After Width: | Height: | Size: 83 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 216 KiB |
|
After Width: | Height: | Size: 56 KiB |
@@ -4,6 +4,10 @@ All notable changes to the **Prowler MCP Server** are documented in this file.
|
||||
|
||||
## [0.7.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- MCP Server tools for Prowler Finding Groups Management [(#11140)](https://github.com/prowler-cloud/prowler/pull/11140)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
- `cryptography` from 46.0.1 to 47.0.0 (transitive) for CVE-2026-39892 and CVE-2026-26007 / CVE-2026-34073 [(#10978)](https://github.com/prowler-cloud/prowler/pull/10978)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
Full access to Prowler Cloud platform and self-managed Prowler App for:
|
||||
- **Findings Analysis**: Query, filter, and analyze security findings across all your cloud environments
|
||||
- **Finding Groups Analysis**: Triage findings grouped by check ID and drill down into affected resources
|
||||
- **Provider Management**: Create, configure, and manage your configured Prowler providers (AWS, Azure, GCP, etc.)
|
||||
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments
|
||||
- **Resource Inventory**: Search and view detailed information about your audited resources
|
||||
@@ -56,13 +57,21 @@ Prowler MCP Server can be used in three ways:
|
||||
- Managed and maintained by Prowler team
|
||||
- Always up-to-date
|
||||
|
||||
Install a reviewed version of `mcp-remote` in a dedicated local workspace first. Avoid running `npx mcp-remote` directly because it can download and execute a new package version on each run.
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.local/share/prowler-mcp-bridge
|
||||
cd ~/.local/share/prowler-mcp-bridge
|
||||
npm init -y
|
||||
npm install --save-exact mcp-remote@0.1.38
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"prowler": {
|
||||
"command": "npx",
|
||||
"command": "/absolute/path/to/.local/share/prowler-mcp-bridge/node_modules/.bin/mcp-remote",
|
||||
"args": [
|
||||
"mcp-remote",
|
||||
"https://mcp.prowler.com/mcp",
|
||||
"--header",
|
||||
"Authorization: Bearer pk_YOUR_API_KEY_HERE"
|
||||
|
||||
@@ -0,0 +1,300 @@
|
||||
"""Pydantic models for Prowler Finding Groups responses."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
|
||||
|
||||
|
||||
FindingStatus = Literal["FAIL", "PASS", "MANUAL"]
|
||||
FindingSeverity = Literal["critical", "high", "medium", "low", "informational"]
|
||||
FindingDelta = Literal["new", "changed"]
|
||||
|
||||
|
||||
def _attributes(data: dict) -> dict:
|
||||
return data.get("attributes", {})
|
||||
|
||||
|
||||
def _counter(attributes: dict, key: str) -> int:
|
||||
return attributes.get(key) or 0
|
||||
|
||||
|
||||
def _simplified_group_kwargs(data: dict) -> dict:
|
||||
attributes = _attributes(data)
|
||||
return {
|
||||
"check_id": attributes.get("check_id", data.get("id", "")),
|
||||
"check_title": attributes.get("check_title"),
|
||||
"severity": attributes.get("severity", "informational"),
|
||||
"status": attributes.get("status", "MANUAL"),
|
||||
"muted": attributes.get("muted", False),
|
||||
"impacted_providers": attributes.get("impacted_providers") or [],
|
||||
"resources_fail": _counter(attributes, "resources_fail"),
|
||||
"resources_total": _counter(attributes, "resources_total"),
|
||||
"pass_count": _counter(attributes, "pass_count"),
|
||||
"fail_count": _counter(attributes, "fail_count"),
|
||||
"manual_count": _counter(attributes, "manual_count"),
|
||||
"muted_count": _counter(attributes, "muted_count"),
|
||||
"new_count": _counter(attributes, "new_count"),
|
||||
"changed_count": _counter(attributes, "changed_count"),
|
||||
"first_seen_at": attributes.get("first_seen_at"),
|
||||
"last_seen_at": attributes.get("last_seen_at"),
|
||||
"failing_since": attributes.get("failing_since"),
|
||||
}
|
||||
|
||||
|
||||
class SimplifiedFindingGroup(MinimalSerializerMixin):
|
||||
"""Finding group summary optimized for browsing many checks."""
|
||||
|
||||
check_id: str = Field(description="Public check ID that identifies this group")
|
||||
check_title: str | None = Field(
|
||||
default=None, description="Human-readable check title"
|
||||
)
|
||||
severity: FindingSeverity = Field(description="Highest severity in the group")
|
||||
status: FindingStatus = Field(description="Aggregated finding group status")
|
||||
muted: bool = Field(
|
||||
description="Whether all findings in this group are muted or accepted"
|
||||
)
|
||||
impacted_providers: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Provider types impacted by this finding group",
|
||||
)
|
||||
resources_fail: int = Field(
|
||||
description="Number of non-muted failing resources in this group", ge=0
|
||||
)
|
||||
resources_total: int = Field(
|
||||
description="Total number of resources in this group", ge=0
|
||||
)
|
||||
pass_count: int = Field(
|
||||
description="Number of non-muted PASS findings in this group", ge=0
|
||||
)
|
||||
fail_count: int = Field(
|
||||
description="Number of non-muted FAIL findings in this group", ge=0
|
||||
)
|
||||
manual_count: int = Field(
|
||||
description="Number of non-muted MANUAL findings in this group", ge=0
|
||||
)
|
||||
muted_count: int = Field(description="Total muted findings in this group", ge=0)
|
||||
new_count: int = Field(description="Number of new non-muted findings", ge=0)
|
||||
changed_count: int = Field(
|
||||
description="Number of changed non-muted findings", ge=0
|
||||
)
|
||||
first_seen_at: str | None = Field(
|
||||
default=None, description="First time this group was detected"
|
||||
)
|
||||
last_seen_at: str | None = Field(
|
||||
default=None, description="Last time this group was detected"
|
||||
)
|
||||
failing_since: str | None = Field(
|
||||
default=None, description="First time this group started failing"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "SimplifiedFindingGroup":
|
||||
"""Transform JSON:API finding group response to simplified format."""
|
||||
return cls(**_simplified_group_kwargs(data))
|
||||
|
||||
|
||||
class DetailedFindingGroup(SimplifiedFindingGroup):
|
||||
"""Finding group with complete counters and descriptive context."""
|
||||
|
||||
check_description: str | None = Field(
|
||||
default=None, description="Description of the check behind this group"
|
||||
)
|
||||
pass_muted_count: int = Field(description="Muted PASS findings", ge=0)
|
||||
fail_muted_count: int = Field(description="Muted FAIL findings", ge=0)
|
||||
manual_muted_count: int = Field(description="Muted MANUAL findings", ge=0)
|
||||
new_fail_count: int = Field(description="New non-muted FAIL findings", ge=0)
|
||||
new_fail_muted_count: int = Field(description="New muted FAIL findings", ge=0)
|
||||
new_pass_count: int = Field(description="New non-muted PASS findings", ge=0)
|
||||
new_pass_muted_count: int = Field(description="New muted PASS findings", ge=0)
|
||||
new_manual_count: int = Field(description="New non-muted MANUAL findings", ge=0)
|
||||
new_manual_muted_count: int = Field(
|
||||
description="New muted MANUAL findings", ge=0
|
||||
)
|
||||
changed_fail_count: int = Field(
|
||||
description="Changed non-muted FAIL findings", ge=0
|
||||
)
|
||||
changed_fail_muted_count: int = Field(
|
||||
description="Changed muted FAIL findings", ge=0
|
||||
)
|
||||
changed_pass_count: int = Field(
|
||||
description="Changed non-muted PASS findings", ge=0
|
||||
)
|
||||
changed_pass_muted_count: int = Field(
|
||||
description="Changed muted PASS findings", ge=0
|
||||
)
|
||||
changed_manual_count: int = Field(
|
||||
description="Changed non-muted MANUAL findings", ge=0
|
||||
)
|
||||
changed_manual_muted_count: int = Field(
|
||||
description="Changed muted MANUAL findings", ge=0
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "DetailedFindingGroup":
|
||||
"""Transform JSON:API finding group response to detailed format."""
|
||||
attributes = _attributes(data)
|
||||
|
||||
return cls(
|
||||
**_simplified_group_kwargs(data),
|
||||
check_description=attributes.get("check_description"),
|
||||
pass_muted_count=_counter(attributes, "pass_muted_count"),
|
||||
fail_muted_count=_counter(attributes, "fail_muted_count"),
|
||||
manual_muted_count=_counter(attributes, "manual_muted_count"),
|
||||
new_fail_count=_counter(attributes, "new_fail_count"),
|
||||
new_fail_muted_count=_counter(attributes, "new_fail_muted_count"),
|
||||
new_pass_count=_counter(attributes, "new_pass_count"),
|
||||
new_pass_muted_count=_counter(attributes, "new_pass_muted_count"),
|
||||
new_manual_count=_counter(attributes, "new_manual_count"),
|
||||
new_manual_muted_count=_counter(attributes, "new_manual_muted_count"),
|
||||
changed_fail_count=_counter(attributes, "changed_fail_count"),
|
||||
changed_fail_muted_count=_counter(attributes, "changed_fail_muted_count"),
|
||||
changed_pass_count=_counter(attributes, "changed_pass_count"),
|
||||
changed_pass_muted_count=_counter(attributes, "changed_pass_muted_count"),
|
||||
changed_manual_count=_counter(attributes, "changed_manual_count"),
|
||||
changed_manual_muted_count=_counter(
|
||||
attributes, "changed_manual_muted_count"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupsListResponse(MinimalSerializerMixin):
|
||||
"""Paginated response for finding group list queries."""
|
||||
|
||||
groups: list[SimplifiedFindingGroup] = Field(
|
||||
description="Finding groups matching the query"
|
||||
)
|
||||
total_num_groups: int = Field(
|
||||
description="Total groups matching the query across all pages", ge=0
|
||||
)
|
||||
total_num_pages: int = Field(description="Total pages available", ge=0)
|
||||
current_page: int = Field(description="Current page number", ge=1)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "FindingGroupsListResponse":
|
||||
"""Transform JSON:API list response to simplified format."""
|
||||
pagination = response.get("meta", {}).get("pagination", {})
|
||||
groups = [
|
||||
SimplifiedFindingGroup.from_api_response(item)
|
||||
for item in response.get("data", [])
|
||||
]
|
||||
|
||||
return cls(
|
||||
groups=groups,
|
||||
total_num_groups=pagination.get("count", len(groups)),
|
||||
total_num_pages=pagination.get("pages", 1),
|
||||
current_page=pagination.get("page", 1),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResourceInfo(MinimalSerializerMixin):
|
||||
"""Nested resource information for a finding group row."""
|
||||
|
||||
uid: str = Field(description="Provider-native resource UID")
|
||||
name: str = Field(description="Resource name")
|
||||
service: str = Field(description="Cloud service")
|
||||
region: str = Field(description="Cloud region")
|
||||
type: str = Field(description="Resource type")
|
||||
resource_group: str | None = Field(
|
||||
default=None, description="Provider resource group or equivalent"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupResourceInfo":
|
||||
"""Transform nested resource data to simplified format."""
|
||||
return cls(
|
||||
uid=data.get("uid", ""),
|
||||
name=data.get("name", ""),
|
||||
service=data.get("service", ""),
|
||||
region=data.get("region", ""),
|
||||
type=data.get("type", ""),
|
||||
resource_group=data.get("resource_group"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupProviderInfo(MinimalSerializerMixin):
|
||||
"""Nested provider information for a finding group resource row."""
|
||||
|
||||
type: str = Field(description="Provider type")
|
||||
uid: str = Field(description="Provider-native account or subscription ID")
|
||||
alias: str | None = Field(default=None, description="Provider alias")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupProviderInfo":
|
||||
"""Transform nested provider data to simplified format."""
|
||||
return cls(
|
||||
type=data.get("type", ""),
|
||||
uid=data.get("uid", ""),
|
||||
alias=data.get("alias"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResource(MinimalSerializerMixin):
|
||||
"""Resource row affected by a finding group."""
|
||||
|
||||
id: str = Field(description="Row identifier for this finding group resource")
|
||||
resource: FindingGroupResourceInfo = Field(description="Affected resource")
|
||||
provider: FindingGroupProviderInfo = Field(description="Affected provider")
|
||||
finding_id: str = Field(
|
||||
description="Finding UUID to use with prowler_app_get_finding_details"
|
||||
)
|
||||
status: FindingStatus = Field(description="Finding status for this resource")
|
||||
severity: FindingSeverity = Field(description="Finding severity")
|
||||
muted: bool = Field(description="Whether the finding is muted")
|
||||
delta: FindingDelta | None = Field(default=None, description="Change status")
|
||||
first_seen_at: str | None = Field(default=None, description="First seen time")
|
||||
last_seen_at: str | None = Field(default=None, description="Last seen time")
|
||||
muted_reason: str | None = Field(default=None, description="Mute reason")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "FindingGroupResource":
|
||||
"""Transform JSON:API finding group resource response."""
|
||||
attributes = _attributes(data)
|
||||
|
||||
return cls(
|
||||
id=data.get("id", ""),
|
||||
resource=FindingGroupResourceInfo.from_api_response(
|
||||
attributes.get("resource") or {}
|
||||
),
|
||||
provider=FindingGroupProviderInfo.from_api_response(
|
||||
attributes.get("provider") or {}
|
||||
),
|
||||
finding_id=str(attributes.get("finding_id", "")),
|
||||
status=attributes.get("status", "MANUAL"),
|
||||
severity=attributes.get("severity", "informational"),
|
||||
muted=attributes.get("muted", False),
|
||||
delta=attributes.get("delta"),
|
||||
first_seen_at=attributes.get("first_seen_at"),
|
||||
last_seen_at=attributes.get("last_seen_at"),
|
||||
muted_reason=attributes.get("muted_reason"),
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupResourcesListResponse(MinimalSerializerMixin):
|
||||
"""Paginated response for finding group resource queries."""
|
||||
|
||||
resources: list[FindingGroupResource] = Field(
|
||||
description="Resources matching the finding group query"
|
||||
)
|
||||
total_num_resources: int = Field(
|
||||
description="Total resources matching the query across all pages", ge=0
|
||||
)
|
||||
total_num_pages: int = Field(description="Total pages available", ge=0)
|
||||
current_page: int = Field(description="Current page number", ge=1)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "FindingGroupResourcesListResponse":
|
||||
"""Transform JSON:API resource list response to simplified format."""
|
||||
pagination = response.get("meta", {}).get("pagination", {})
|
||||
resources = [
|
||||
FindingGroupResource.from_api_response(item)
|
||||
for item in response.get("data", [])
|
||||
]
|
||||
|
||||
return cls(
|
||||
resources=resources,
|
||||
total_num_resources=pagination.get("count", len(resources)),
|
||||
total_num_pages=pagination.get("pages", 1),
|
||||
current_page=pagination.get("page", 1),
|
||||
)
|
||||
@@ -0,0 +1,473 @@
|
||||
"""Finding Groups tools for Prowler App MCP Server.
|
||||
|
||||
This module provides read-only tools for finding group triage and drill-downs.
|
||||
"""
|
||||
|
||||
from typing import Any, Literal
|
||||
from urllib.parse import quote
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.finding_groups import (
|
||||
DetailedFindingGroup,
|
||||
FindingGroupResourcesListResponse,
|
||||
FindingGroupsListResponse,
|
||||
)
|
||||
from prowler_mcp_server.prowler_app.tools.base import BaseTool
|
||||
|
||||
|
||||
StatusFilter = Literal["FAIL", "PASS", "MANUAL"]
|
||||
SeverityFilter = Literal["critical", "high", "medium", "low", "informational"]
|
||||
DeltaFilter = Literal["new", "changed"]
|
||||
|
||||
GROUP_DETAIL_FIELDS = (
|
||||
"check_id,check_title,check_description,severity,status,muted,"
|
||||
"impacted_providers,resources_fail,resources_total,pass_count,fail_count,"
|
||||
"manual_count,pass_muted_count,fail_muted_count,manual_muted_count,"
|
||||
"muted_count,new_count,changed_count,new_fail_count,new_fail_muted_count,"
|
||||
"new_pass_count,new_pass_muted_count,new_manual_count,new_manual_muted_count,"
|
||||
"changed_fail_count,changed_fail_muted_count,changed_pass_count,"
|
||||
"changed_pass_muted_count,changed_manual_count,changed_manual_muted_count,"
|
||||
"first_seen_at,last_seen_at,failing_since"
|
||||
)
|
||||
|
||||
GROUP_LIST_FIELDS = (
|
||||
"check_id,check_title,severity,status,muted,impacted_providers,"
|
||||
"resources_fail,resources_total,pass_count,fail_count,manual_count,"
|
||||
"muted_count,new_count,changed_count,first_seen_at,last_seen_at,failing_since"
|
||||
)
|
||||
|
||||
RESOURCE_FIELDS = (
|
||||
"resource,provider,finding_id,status,severity,muted,delta,"
|
||||
"first_seen_at,last_seen_at,muted_reason"
|
||||
)
|
||||
|
||||
|
||||
class FindingGroupsTools(BaseTool):
|
||||
"""Tools for Finding Groups operations."""
|
||||
|
||||
@staticmethod
|
||||
def _bool_value(value: bool | str) -> bool:
|
||||
"""Normalize bool-like MCP client values."""
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return value.lower() == "true"
|
||||
|
||||
@staticmethod
|
||||
def _group_endpoint(date_range: tuple[str, str] | None) -> str:
|
||||
return "/finding-groups/latest" if date_range is None else "/finding-groups"
|
||||
|
||||
@staticmethod
|
||||
def _resource_endpoint(check_id: str, date_range: tuple[str, str] | None) -> str:
|
||||
escaped_check_id = quote(check_id, safe="")
|
||||
if date_range is None:
|
||||
return f"/finding-groups/latest/{escaped_check_id}/resources"
|
||||
return f"/finding-groups/{escaped_check_id}/resources"
|
||||
|
||||
def _base_date_params(
|
||||
self, date_from: str | None, date_to: str | None
|
||||
) -> tuple[tuple[str, str] | None, dict[str, Any]]:
|
||||
date_range = self.api_client.normalize_date_range(
|
||||
date_from, date_to, max_days=2
|
||||
)
|
||||
if date_range is None:
|
||||
return None, {}
|
||||
|
||||
return date_range, {
|
||||
"filter[inserted_at__gte]": date_range[0],
|
||||
"filter[inserted_at__lte]": date_range[1],
|
||||
}
|
||||
|
||||
def _apply_common_filters(
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
provider: list[str],
|
||||
provider_type: list[str],
|
||||
provider_uid: list[str],
|
||||
provider_alias: str | None,
|
||||
region: list[str],
|
||||
service: list[str],
|
||||
resource_type: list[str],
|
||||
resource_name: str | None,
|
||||
resource_uid: str | None,
|
||||
resource_group: list[str],
|
||||
category: list[str],
|
||||
check_id: list[str],
|
||||
check_title: str | None,
|
||||
severity: list[SeverityFilter],
|
||||
status: list[StatusFilter],
|
||||
muted: bool | str | None,
|
||||
delta: list[DeltaFilter],
|
||||
) -> None:
|
||||
if provider:
|
||||
params["filter[provider__in]"] = provider
|
||||
if provider_type:
|
||||
params["filter[provider_type__in]"] = provider_type
|
||||
if provider_uid:
|
||||
params["filter[provider_uid__in]"] = provider_uid
|
||||
if provider_alias:
|
||||
params["filter[provider_alias__icontains]"] = provider_alias
|
||||
if region:
|
||||
params["filter[region__in]"] = region
|
||||
if service:
|
||||
params["filter[service__in]"] = service
|
||||
if resource_type:
|
||||
params["filter[resource_type__in]"] = resource_type
|
||||
if resource_name:
|
||||
params["filter[resource_name__icontains]"] = resource_name
|
||||
if resource_uid:
|
||||
params["filter[resource_uid__icontains]"] = resource_uid
|
||||
if resource_group:
|
||||
params["filter[resource_groups__in]"] = resource_group
|
||||
if category:
|
||||
params["filter[category__in]"] = category
|
||||
if check_id:
|
||||
params["filter[check_id__in]"] = check_id
|
||||
if check_title:
|
||||
params["filter[check_title__icontains]"] = check_title
|
||||
if severity:
|
||||
params["filter[severity__in]"] = severity
|
||||
if status:
|
||||
params["filter[status__in]"] = status
|
||||
if muted is not None:
|
||||
params["filter[muted]"] = self._bool_value(muted)
|
||||
if delta:
|
||||
params["filter[delta__in]"] = delta
|
||||
|
||||
async def list_finding_groups(
|
||||
self,
|
||||
provider: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider UUIDs. Multiple values allowed. If empty, all visible providers are returned.",
|
||||
),
|
||||
provider_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider type. Multiple values allowed, such as aws, azure, gcp, kubernetes, github, or m365.",
|
||||
),
|
||||
provider_uid: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider-native account, subscription, or project IDs. Multiple values allowed.",
|
||||
),
|
||||
provider_alias: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by provider alias/name using partial matching.",
|
||||
),
|
||||
region: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud regions. Multiple values allowed.",
|
||||
),
|
||||
service: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud services. Multiple values allowed.",
|
||||
),
|
||||
resource_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource types. Multiple values allowed.",
|
||||
),
|
||||
resource_name: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource name using partial matching.",
|
||||
),
|
||||
resource_uid: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource UID using partial matching.",
|
||||
),
|
||||
resource_group: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource group values. Multiple values allowed.",
|
||||
),
|
||||
category: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by finding categories. Multiple values allowed.",
|
||||
),
|
||||
check_id: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by check IDs. Multiple values allowed.",
|
||||
),
|
||||
check_title: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by check title using partial matching.",
|
||||
),
|
||||
severity: list[SeverityFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by aggregated severity. Empty returns all severities.",
|
||||
),
|
||||
status: list[StatusFilter] = Field(
|
||||
default=["FAIL"],
|
||||
description="Filter by aggregated status. Default returns failing groups. Pass [] to return all statuses.",
|
||||
),
|
||||
muted: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Filter by fully muted group state. Accepts true/false.",
|
||||
),
|
||||
include_muted: bool | str = Field(
|
||||
default=False,
|
||||
description="When false, excludes fully muted groups. Set true to include fully muted groups.",
|
||||
),
|
||||
delta: list[DeltaFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by group delta values: new or changed.",
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
sort: str | None = Field(
|
||||
default=None,
|
||||
description="Optional sort expression supported by the finding-groups API, such as -fail_count,-severity,check_id.",
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50, description="Number of groups to return per page"
|
||||
),
|
||||
page_number: int = Field(
|
||||
default=1, description="Page number to retrieve (1-indexed)"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""List finding groups aggregated by check ID.
|
||||
|
||||
Default behavior returns the latest non-muted FAIL groups for fast triage.
|
||||
Without dates this uses `/finding-groups/latest`. With `date_from` or
|
||||
`date_to`, this uses `/finding-groups` with a maximum 2-day date window.
|
||||
|
||||
Use this tool to find noisy or high-impact checks, then call
|
||||
prowler_app_get_finding_group_details for complete counters or
|
||||
prowler_app_list_finding_group_resources to drill into affected resources.
|
||||
"""
|
||||
try:
|
||||
self.api_client.validate_page_size(page_size)
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._group_endpoint(date_range)
|
||||
|
||||
self._apply_common_filters(
|
||||
params,
|
||||
provider,
|
||||
provider_type,
|
||||
provider_uid,
|
||||
provider_alias,
|
||||
region,
|
||||
service,
|
||||
resource_type,
|
||||
resource_name,
|
||||
resource_uid,
|
||||
resource_group,
|
||||
category,
|
||||
check_id,
|
||||
check_title,
|
||||
severity,
|
||||
status,
|
||||
muted,
|
||||
delta,
|
||||
)
|
||||
|
||||
params["filter[include_muted]"] = self._bool_value(include_muted)
|
||||
params["page[size]"] = page_size
|
||||
params["page[number]"] = page_number
|
||||
params["fields[finding-groups]"] = GROUP_LIST_FIELDS
|
||||
if sort:
|
||||
params["sort"] = sort
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
response = FindingGroupsListResponse.from_api_response(api_response)
|
||||
return response.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error listing finding groups: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def get_finding_group_details(
|
||||
self,
|
||||
check_id: str = Field(
|
||||
description="Public check ID that identifies the finding group. This is not a UUID."
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""Get complete details for one finding group by exact check ID.
|
||||
|
||||
Uses `filter[check_id]` exact matching against latest data by default,
|
||||
or historical data when dates are provided. Fully muted groups are
|
||||
included by default so accepted risk does not look like a missing group.
|
||||
"""
|
||||
try:
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._group_endpoint(date_range)
|
||||
|
||||
params.update(
|
||||
{
|
||||
"filter[check_id]": check_id,
|
||||
"filter[include_muted]": True,
|
||||
"page[size]": 1,
|
||||
"page[number]": 1,
|
||||
"fields[finding-groups]": GROUP_DETAIL_FIELDS,
|
||||
}
|
||||
)
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
data = api_response.get("data", [])
|
||||
|
||||
if not data:
|
||||
return {
|
||||
"error": f"Finding group '{check_id}' not found.",
|
||||
"status": "not_found",
|
||||
}
|
||||
|
||||
group = DetailedFindingGroup.from_api_response(data[0])
|
||||
return group.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting finding group details: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def list_finding_group_resources(
|
||||
self,
|
||||
check_id: str = Field(
|
||||
description="Public check ID that identifies the finding group. This is not a UUID."
|
||||
),
|
||||
provider: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider UUIDs. Multiple values allowed.",
|
||||
),
|
||||
provider_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider type. Multiple values allowed.",
|
||||
),
|
||||
provider_uid: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider-native account, subscription, or project IDs. Multiple values allowed.",
|
||||
),
|
||||
provider_alias: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by provider alias/name using partial matching.",
|
||||
),
|
||||
region: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud regions. Multiple values allowed.",
|
||||
),
|
||||
service: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud services. Multiple values allowed.",
|
||||
),
|
||||
resource_type: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource types. Multiple values allowed.",
|
||||
),
|
||||
resource_name: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource name using partial matching.",
|
||||
),
|
||||
resource_uid: str | None = Field(
|
||||
default=None,
|
||||
description="Filter by resource UID using partial matching.",
|
||||
),
|
||||
resource_group: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by resource group values. Multiple values allowed.",
|
||||
),
|
||||
category: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by finding categories. Multiple values allowed.",
|
||||
),
|
||||
severity: list[SeverityFilter] = Field(
|
||||
default=[],
|
||||
description="Filter by severity. Empty returns all severities.",
|
||||
),
|
||||
status: list[StatusFilter] = Field(
|
||||
default=["FAIL"],
|
||||
description="Filter by status. Default returns failing resources. Pass [] to return all statuses.",
|
||||
),
|
||||
muted: bool | str | None = Field(
|
||||
default=None,
|
||||
description="Filter by muted state. Accepts true/false. Overrides include_muted when provided.",
|
||||
),
|
||||
include_muted: bool | str = Field(
|
||||
default=False,
|
||||
description="When false, returns only actionable unmuted resources by applying muted=false. Set true to include muted and unmuted resources.",
|
||||
),
|
||||
delta: list[DeltaFilter] = Field(
|
||||
default=[], description="Filter by delta values: new or changed."
|
||||
),
|
||||
date_from: str | None = Field(
|
||||
default=None,
|
||||
description="Start date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
date_to: str | None = Field(
|
||||
default=None,
|
||||
description="End date for historical query in YYYY-MM-DD format. Maximum range is 2 days.",
|
||||
),
|
||||
sort: str | None = Field(
|
||||
default=None,
|
||||
description="Optional sort expression supported by the finding group resources API.",
|
||||
),
|
||||
page_size: int = Field(
|
||||
default=50, description="Number of resources to return per page"
|
||||
),
|
||||
page_number: int = Field(
|
||||
default=1, description="Page number to retrieve (1-indexed)"
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""List resources affected by a finding group.
|
||||
|
||||
Without dates this uses `/finding-groups/latest/{check_id}/resources`.
|
||||
With `date_from` or `date_to`, this uses
|
||||
`/finding-groups/{check_id}/resources` with a maximum 2-day date window.
|
||||
|
||||
Default behavior returns FAIL, unmuted resources so the result is
|
||||
actionable. Set `include_muted=True` to include accepted/suppressed
|
||||
resources too. Each row includes nested resource and provider data plus
|
||||
`finding_id`. Use `prowler_app_get_finding_details(finding_id)` to
|
||||
retrieve complete remediation guidance for a specific resource finding.
|
||||
"""
|
||||
try:
|
||||
self.api_client.validate_page_size(page_size)
|
||||
date_range, params = self._base_date_params(date_from, date_to)
|
||||
endpoint = self._resource_endpoint(check_id, date_range)
|
||||
|
||||
if muted is None and not self._bool_value(include_muted):
|
||||
muted = False
|
||||
|
||||
self._apply_common_filters(
|
||||
params,
|
||||
provider,
|
||||
provider_type,
|
||||
provider_uid,
|
||||
provider_alias,
|
||||
region,
|
||||
service,
|
||||
resource_type,
|
||||
resource_name,
|
||||
resource_uid,
|
||||
resource_group,
|
||||
category,
|
||||
[],
|
||||
None,
|
||||
severity,
|
||||
status,
|
||||
muted,
|
||||
delta,
|
||||
)
|
||||
|
||||
params["page[size]"] = page_size
|
||||
params["page[number]"] = page_number
|
||||
params["fields[finding-group-resources]"] = RESOURCE_FIELDS
|
||||
if sort:
|
||||
params["sort"] = sort
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
api_response = await self.api_client.get(endpoint, params=clean_params)
|
||||
response = FindingGroupResourcesListResponse.from_api_response(
|
||||
api_response
|
||||
)
|
||||
return response.model_dump()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error listing finding group resources: {e}")
|
||||
return {"error": str(e), "status": "failed"}
|
||||
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "about-time"
|
||||
@@ -12,6 +12,19 @@ files = [
|
||||
{file = "about_time-4.2.1-py3-none-any.whl", hash = "sha256:8bbf4c75fe13cbd3d72f49a03b02c5c7dca32169b6d49117c257e7eb3eaee341"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aenum"
|
||||
version = "3.1.17"
|
||||
description = "Advanced Enumerations (compatible with Python's stdlib Enum), NamedTuples, and NamedConstants"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aenum-3.1.17-py2-none-any.whl", hash = "sha256:0dad0421b2fbe30e3fb623b2a0a23eff823407df53829d6a72595e7f76f3d872"},
|
||||
{file = "aenum-3.1.17-py3-none-any.whl", hash = "sha256:8b883a37a04e74cc838ac442bdd28c266eae5bbf13e1342c7ef123ed25230139"},
|
||||
{file = "aenum-3.1.17.tar.gz", hash = "sha256:a969a4516b194895de72c875ece355f17c0d272146f7fda346ef74f93cf4d5ba"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiofiles"
|
||||
version = "24.1.0"
|
||||
@@ -3144,6 +3157,22 @@ files = [
|
||||
[package.dependencies]
|
||||
referencing = ">=0.31.0"
|
||||
|
||||
[[package]]
|
||||
name = "jwcrypto"
|
||||
version = "1.5.7"
|
||||
description = "Implementation of JOSE Web standards"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "jwcrypto-1.5.7-py3-none-any.whl", hash = "sha256:729463fefe28b6de5cf1ebfda3e94f1a1b41d2799148ef98a01cb9678ebe2bb0"},
|
||||
{file = "jwcrypto-1.5.7.tar.gz", hash = "sha256:70204d7cca406eda8c82352e3c41ba2d946610dafd19e54403f0a1f4f18633c6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.4"
|
||||
typing_extensions = ">=4.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "keystoneauth1"
|
||||
version = "5.13.0"
|
||||
@@ -4112,6 +4141,35 @@ urllib3 = {version = ">=2.6.3", markers = "python_version >= \"3.10.0\""}
|
||||
[package.extras]
|
||||
adk = ["docstring-parser (>=0.16) ; python_version >= \"3.10\" and python_version < \"4\"", "mcp (>=1.6.0) ; python_version >= \"3.10\" and python_version < \"4\"", "pydantic (>=2.10.6) ; python_version >= \"3.10\" and python_version < \"4\"", "rich (>=13.9.4) ; python_version >= \"3.10\" and python_version < \"4\""]
|
||||
|
||||
[[package]]
|
||||
name = "okta"
|
||||
version = "3.4.2"
|
||||
description = "Python SDK for the Okta Management API"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "okta-3.4.2-py3-none-any.whl", hash = "sha256:b67bcff31de65223c5848894a202153236d0c99e3a8541a54bf7065f81676637"},
|
||||
{file = "okta-3.4.2.tar.gz", hash = "sha256:b05201056f3f028c5d2d16394f9b47024a689080f5a993c11d4d80f0e1b5ba1e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aenum = ">=3.1.16"
|
||||
aiohttp = ">=3.13.4"
|
||||
blinker = ">=1.9.0"
|
||||
jwcrypto = ">=1.5.6"
|
||||
pycryptodomex = ">=3.23.0"
|
||||
pydantic = ">=2.11.3"
|
||||
pydash = ">=8.0.6"
|
||||
PyJWT = ">=2.12.0"
|
||||
python-dateutil = ">=2.9.0.post0"
|
||||
PyYAML = ">=6.0.3"
|
||||
requests = ">=2.33.0"
|
||||
xmltodict = ">=1.0.2"
|
||||
|
||||
[package.extras]
|
||||
images = ["pillow (>=9.0.0,<12)"]
|
||||
|
||||
[[package]]
|
||||
name = "openapi-schema-validator"
|
||||
version = "0.6.3"
|
||||
@@ -4752,6 +4810,57 @@ files = [
|
||||
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pycryptodomex"
|
||||
version = "3.23.0"
|
||||
description = "Cryptographic library for Python"
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:add243d204e125f189819db65eed55e6b4713f70a7e9576c043178656529cec7"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1c6d919fc8429e5cb228ba8c0d4d03d202a560b421c14867a65f6042990adc8e"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1c3a65ad441746b250d781910d26b7ed0a396733c6f2dbc3327bd7051ec8a541"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27m-win32.whl", hash = "sha256:47f6d318fe864d02d5e59a20a18834819596c4ed1d3c917801b22b92b3ffa648"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:d9825410197a97685d6a1fa2a86196430b01877d64458a20e95d4fd00d739a08"},
|
||||
{file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:267a3038f87a8565bd834317dbf053a02055915acf353bf42ededb9edaf72010"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7b37e08e3871efe2187bc1fd9320cc81d87caf19816c648f24443483005ff886"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:91979028227543010d7b2ba2471cf1d1e398b3f183cb105ac584df0c36dac28d"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8962204c47464d5c1c4038abeadd4514a133b28748bcd9fa5b6d62e3cec6fa"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a33986a0066860f7fcf7c7bd2bc804fa90e434183645595ae7b33d01f3c91ed8"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7947ab8d589e3178da3d7cdeabe14f841b391e17046954f2fbcd941705762b5"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c25e30a20e1b426e1f0fa00131c516f16e474204eee1139d1603e132acffc314"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:da4fa650cef02db88c2b98acc5434461e027dce0ae8c22dd5a69013eaf510006"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58b851b9effd0d072d4ca2e4542bf2a4abcf13c82a29fd2c93ce27ee2a2e9462"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:a9d446e844f08299236780f2efa9898c818fe7e02f17263866b8550c7d5fb328"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bc65bdd9fc8de7a35a74cab1c898cab391a4add33a8fe740bda00f5976ca4708"},
|
||||
{file = "pycryptodomex-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c885da45e70139464f082018ac527fdaad26f1657a99ee13eecdce0f0ca24ab4"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9"},
|
||||
{file = "pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51"},
|
||||
{file = "pycryptodomex-3.23.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:febec69c0291efd056c65691b6d9a339f8b4bc43c6635b8699471248fe897fea"},
|
||||
{file = "pycryptodomex-3.23.0-pp27-pypy_73-win32.whl", hash = "sha256:c84b239a1f4ec62e9c789aafe0543f0594f0acd90c8d9e15bcece3efe55eca66"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ebfff755c360d674306e5891c564a274a47953562b42fb74a5c25b8fc1fb1cb5"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca54f4bb349d45afc17e3011ed4264ef1cc9e266699874cdd1349c504e64798"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2596e643d4365e14d0879dc5aafe6355616c61c2176009270f3048f6d9a61f"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdfac7cda115bca3a5abb2f9e43bc2fb66c2b65ab074913643803ca7083a79ea"},
|
||||
{file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:14c37aaece158d0ace436f76a7bb19093db3b4deade9797abfc39ec6cd6cc2fe"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7de1e40a41a5d7f1ac42b6569b10bcdded34339950945948529067d8426d2785"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bffc92138d75664b6d543984db7893a628559b9e78658563b0395e2a5fb47ed9"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df027262368334552db2c0ce39706b3fb32022d1dce34673d0f9422df004b96a"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e79f1aaff5a3a374e92eb462fa9e598585452135012e2945f96874ca6eeb1ff"},
|
||||
{file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:27e13c80ac9a0a1d050ef0a7e0a18cc04c8850101ec891815b6c5a0375e8a245"},
|
||||
{file = "pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.12.5"
|
||||
@@ -4908,6 +5017,24 @@ files = [
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.14.1"
|
||||
|
||||
[[package]]
|
||||
name = "pydash"
|
||||
version = "8.0.6"
|
||||
description = "The kitchen sink of Python utility libraries for doing \"stuff\" in a functional way. Based on the Lo-Dash Javascript library."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydash-8.0.6-py3-none-any.whl", hash = "sha256:ee70a81a5b292c007f28f03a4ee8e75c1f5d7576df5457b836ec7ab2839cc5d0"},
|
||||
{file = "pydash-8.0.6.tar.gz", hash = "sha256:b2821547e9723f69cf3a986be4db64de41730be149b2641947ecd12e1e11025a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">3.10,<4.6.0 || >4.6.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["build", "coverage", "furo", "invoke", "mypy", "pytest", "pytest-cov", "pytest-mypy-testing", "ruff", "sphinx", "sphinx-autodoc-typehints", "tox", "twine", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
version = "3.2.0"
|
||||
@@ -5240,65 +5367,85 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.2"
|
||||
version = "6.0.3"
|
||||
description = "YAML parser and emitter for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
|
||||
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
|
||||
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
|
||||
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
|
||||
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
|
||||
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
|
||||
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
|
||||
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"},
|
||||
{file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"},
|
||||
{file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"},
|
||||
{file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"},
|
||||
{file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"},
|
||||
{file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"},
|
||||
{file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"},
|
||||
{file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"},
|
||||
{file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6447,16 +6594,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "xmltodict"
|
||||
version = "0.14.2"
|
||||
version = "1.0.4"
|
||||
description = "Makes working with XML feel like you are working with JSON"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["dev"]
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "xmltodict-0.14.2-py2.py3-none-any.whl", hash = "sha256:20cc7d723ed729276e808f26fb6b3599f786cbc37e06c65e192ba77c40f20aac"},
|
||||
{file = "xmltodict-0.14.2.tar.gz", hash = "sha256:201e7c28bb210e374999d1dde6382923ab0ed1a8a5faeece48ab525b7810a553"},
|
||||
{file = "xmltodict-1.0.4-py3-none-any.whl", hash = "sha256:a4a00d300b0e1c59fc2bfccb53d7b2e88c32f200df138a0dd2229f842497026a"},
|
||||
{file = "xmltodict-1.0.4.tar.gz", hash = "sha256:6d94c9f834dd9e44514162799d344d815a3a4faec913717a9ecbfa5be1bb8e61"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "yarl"
|
||||
version = "1.20.1"
|
||||
@@ -6735,4 +6885,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.13"
|
||||
content-hash = "d7e2ad41783a864bb845f63ccc10c88ae1e4ac36d61993ea106bbb4a5f58a843"
|
||||
content-hash = "96359a9bfe4031fb0747c22eb4b00f2a008e3fb6d07189fa0fe6ee3875b1f913"
|
||||
|
||||
@@ -9,10 +9,12 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- `entra_service_principal_no_secrets_for_permanent_tier0_roles` check for M365 provider [(#10788)](https://github.com/prowler-cloud/prowler/pull/10788)
|
||||
- `iam_user_access_not_stale_to_sagemaker` check for AWS provider with configurable `max_unused_sagemaker_access_days` (default 90) [(#11000)](https://github.com/prowler-cloud/prowler/pull/11000)
|
||||
- `cloudtrail_bedrock_logging_enabled` check for AWS provider [(#10858)](https://github.com/prowler-cloud/prowler/pull/10858)
|
||||
- Okta provider with OAuth 2.0 authentication and `signon_global_session_idle_timeout_15min` check [(#11079)](https://github.com/prowler-cloud/prowler/pull/11079)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- `entra_emergency_access_exclusion` check for M365 provider now scopes the exclusion requirement to enabled Conditional Access policies with a `Block` grant control instead of every enabled policy, focusing on the lockout-relevant policy set [(#10849)](https://github.com/prowler-cloud/prowler/pull/10849)
|
||||
- AWS IAM customer-managed policy checks no longer emit `FAIL` on unattached policies unless `--scan-unused-services` is enabled [(#11150)](https://github.com/prowler-cloud/prowler/pull/11150)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -154,6 +154,7 @@ from prowler.providers.llm.models import LLMOutputOptions
|
||||
from prowler.providers.m365.models import M365OutputOptions
|
||||
from prowler.providers.mongodbatlas.models import MongoDBAtlasOutputOptions
|
||||
from prowler.providers.nhn.models import NHNOutputOptions
|
||||
from prowler.providers.okta.models import OktaOutputOptions
|
||||
from prowler.providers.openstack.models import OpenStackOutputOptions
|
||||
from prowler.providers.oraclecloud.models import OCIOutputOptions
|
||||
from prowler.providers.vercel.models import VercelOutputOptions
|
||||
@@ -426,6 +427,10 @@ def prowler():
|
||||
output_options = VercelOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "okta":
|
||||
output_options = OktaOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
|
||||
# Run the quick inventory for the provider if available
|
||||
if hasattr(args, "quick_inventory") and args.quick_inventory:
|
||||
|
||||
@@ -76,6 +76,7 @@ class Provider(str, Enum):
|
||||
OPENSTACK = "openstack"
|
||||
IMAGE = "image"
|
||||
VERCEL = "vercel"
|
||||
OKTA = "okta"
|
||||
|
||||
|
||||
# Compliance
|
||||
|
||||
@@ -649,3 +649,11 @@ vercel:
|
||||
- "_PASSWORD"
|
||||
- "_API_KEY"
|
||||
- "_PRIVATE_KEY"
|
||||
|
||||
okta:
|
||||
# Okta Sign-On Policies
|
||||
# okta.signon_global_session_idle_timeout_15min
|
||||
# Maximum acceptable Global Session idle timeout, in minutes. Defaults to
|
||||
# 15 per DISA STIG V-273186 (OKTA-APP-000020); raise it only with an
|
||||
# explicit risk acceptance.
|
||||
okta_max_session_idle_minutes: 15
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Account == <Okta organization domain, e.g. acme.okta.com>
|
||||
### Bare domain only — no scheme, no path, no trailing slash.
|
||||
### Region is always "*" — Okta has no regional concept.
|
||||
### Resources matches against the policy name (e.g. "Default Policy"), not the id.
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
### For each check you can except Accounts, Regions, Resources and/or Tags.
|
||||
########################### MUTELIST EXAMPLE ###########################
|
||||
Mutelist:
|
||||
Accounts:
|
||||
"acme.okta.com":
|
||||
Checks:
|
||||
"signon_global_session_idle_timeout_15min":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "Default Policy"
|
||||
@@ -745,6 +745,10 @@ def execute(
|
||||
is_finding_muted_args["tenancy_id"] = (
|
||||
global_provider.identity.tenancy_id
|
||||
)
|
||||
elif global_provider.type == "okta":
|
||||
is_finding_muted_args["org_domain"] = (
|
||||
global_provider.identity.org_domain
|
||||
)
|
||||
for finding in check_findings:
|
||||
if global_provider.type == "cloudflare":
|
||||
is_finding_muted_args["account_id"] = finding.account_id
|
||||
|
||||
@@ -933,6 +933,41 @@ class CheckReportGithub(Check_Report):
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckReportOkta(Check_Report):
|
||||
"""Contains the Okta Check's finding information."""
|
||||
|
||||
resource_name: str
|
||||
resource_id: str
|
||||
org_domain: str
|
||||
region: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metadata: Dict,
|
||||
resource: Any,
|
||||
resource_name: str = None,
|
||||
resource_id: str = None,
|
||||
org_domain: str = None,
|
||||
region: str = "global",
|
||||
) -> None:
|
||||
"""Initialize the Okta Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource.
|
||||
resource_name: The name of the resource related with the finding.
|
||||
resource_id: The id of the resource related with the finding.
|
||||
org_domain: The Okta organization domain related with the finding.
|
||||
region: Always "global" — Okta has no regional concept.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = resource_name or getattr(resource, "name", "")
|
||||
self.resource_id = resource_id or getattr(resource, "id", "")
|
||||
self.org_domain = org_domain or getattr(resource, "org_domain", "")
|
||||
self.region = region
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckReportGoogleWorkspace(Check_Report):
|
||||
"""Contains the Google Workspace Check's finding information."""
|
||||
|
||||
@@ -29,10 +29,10 @@ class ProwlerArgumentParser:
|
||||
self.parser = argparse.ArgumentParser(
|
||||
prog="prowler",
|
||||
formatter_class=RawTextHelpFormatter,
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ...",
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ...",
|
||||
epilog="""
|
||||
Available Cloud Providers:
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel}
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel}
|
||||
aws AWS Provider
|
||||
azure Azure Provider
|
||||
gcp GCP Provider
|
||||
@@ -40,6 +40,7 @@ Available Cloud Providers:
|
||||
m365 Microsoft 365 Provider
|
||||
github GitHub Provider
|
||||
googleworkspace Google Workspace Provider
|
||||
okta Okta Provider
|
||||
cloudflare Cloudflare Provider
|
||||
oraclecloud Oracle Cloud Infrastructure Provider
|
||||
openstack OpenStack Provider
|
||||
|
||||
@@ -427,6 +427,21 @@ class Finding(BaseModel):
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["region"] = "global"
|
||||
|
||||
elif provider.type == "okta":
|
||||
output_data["auth_method"] = provider.auth_method
|
||||
output_data["account_uid"] = get_nested_attribute(
|
||||
provider, "identity.org_domain"
|
||||
)
|
||||
output_data["account_name"] = get_nested_attribute(
|
||||
provider, "identity.org_domain"
|
||||
)
|
||||
output_data["account_organization_uid"] = get_nested_attribute(
|
||||
provider, "identity.client_id"
|
||||
)
|
||||
output_data["resource_name"] = check_output.resource_name
|
||||
output_data["resource_uid"] = check_output.resource_id
|
||||
output_data["region"] = "global"
|
||||
|
||||
elif provider.type == "alibabacloud":
|
||||
output_data["auth_method"] = get_nested_attribute(
|
||||
provider, "identity.identity_arn"
|
||||
|
||||
@@ -1400,6 +1400,56 @@ class HTML(Output):
|
||||
)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_okta_assessment_summary(provider: Provider) -> str:
|
||||
"""
|
||||
get_okta_assessment_summary gets the HTML assessment summary for the Okta provider
|
||||
|
||||
Args:
|
||||
provider (Provider): the Okta provider object
|
||||
|
||||
Returns:
|
||||
str: HTML assessment summary for the Okta provider
|
||||
"""
|
||||
try:
|
||||
assessment_items = f"""
|
||||
<li class="list-group-item">
|
||||
<b>Okta Domain:</b> {provider.identity.org_domain}
|
||||
</li>"""
|
||||
|
||||
credentials_items = f"""
|
||||
<li class="list-group-item">
|
||||
<b>Authentication:</b> {provider.auth_method}
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<b>Client ID:</b> {provider.identity.client_id}
|
||||
</li>"""
|
||||
|
||||
return f"""
|
||||
<div class="col-md-2">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Okta Assessment Summary
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{assessment_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Okta Credentials
|
||||
</div>
|
||||
<ul class="list-group list-group-flush">{credentials_items}
|
||||
</ul>
|
||||
</div>
|
||||
</div>"""
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_assessment_summary(provider: Provider) -> str:
|
||||
"""
|
||||
|
||||
@@ -40,6 +40,8 @@ def stdout_report(finding, color, verbose, status, fix):
|
||||
details = finding.location
|
||||
if finding.check_metadata.Provider == "vercel":
|
||||
details = finding.region
|
||||
if finding.check_metadata.Provider == "okta":
|
||||
details = finding.region
|
||||
|
||||
if (verbose or fix) and (not status or finding.status in status):
|
||||
if finding.muted:
|
||||
|
||||
@@ -108,6 +108,9 @@ def display_summary_table(
|
||||
)
|
||||
else:
|
||||
audited_entities = provider.identity.username or "Personal Account"
|
||||
elif provider.type == "okta":
|
||||
entity_type = "Okta Org"
|
||||
audited_entities = provider.identity.org_domain
|
||||
|
||||
# Check if there are findings and that they are not all MANUAL
|
||||
if findings and not all(finding.status == "MANUAL" for finding in findings):
|
||||
|
||||
@@ -16,6 +16,8 @@ class iam_no_custom_policy_permissive_role_assumption(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_allows_privilege_escalation(Check):
|
||||
|
||||
for policy in iam_client.policies.values():
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_no_full_access_to_cloudtrail(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -11,6 +11,8 @@ class iam_policy_no_full_access_to_kms(Check):
|
||||
for policy in iam_client.policies.values():
|
||||
# Check only custom policies
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -10,6 +10,8 @@ class iam_policy_no_wildcard_marketplace_subscribe(Check):
|
||||
findings = []
|
||||
for policy in iam_client.policies.values():
|
||||
if policy.type == "Custom":
|
||||
if not policy.attached and not iam_client.provider.scan_unused_services:
|
||||
continue
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=policy)
|
||||
report.region = iam_client.region
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -403,6 +403,19 @@ class Provider(ABC):
|
||||
mutelist_path=arguments.mutelist_file,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
elif "okta" in provider_class_name.lower():
|
||||
provider_class(
|
||||
okta_org_domain=getattr(arguments, "okta_org_domain", ""),
|
||||
okta_client_id=getattr(arguments, "okta_client_id", ""),
|
||||
okta_private_key=getattr(arguments, "okta_private_key", ""),
|
||||
okta_private_key_file=getattr(
|
||||
arguments, "okta_private_key_file", ""
|
||||
),
|
||||
okta_scopes=getattr(arguments, "okta_scopes", None),
|
||||
config_path=arguments.config_file,
|
||||
mutelist_path=arguments.mutelist_file,
|
||||
fixer_config=fixer_config,
|
||||
)
|
||||
|
||||
except TypeError as error:
|
||||
logger.critical(
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
from prowler.exceptions.exceptions import ProwlerException
|
||||
|
||||
|
||||
# Exceptions codes from 14000 to 14999 are reserved for Okta exceptions
|
||||
class OktaBaseException(ProwlerException):
|
||||
"""Base class for Okta Errors."""
|
||||
|
||||
OKTA_ERROR_CODES = {
|
||||
(14000, "OktaEnvironmentVariableError"): {
|
||||
"message": "Okta environment variable error",
|
||||
"remediation": "Check the Okta environment variables and ensure they are properly set.",
|
||||
},
|
||||
(14001, "OktaSetUpSessionError"): {
|
||||
"message": "Error setting up Okta session",
|
||||
"remediation": "Check the OAuth credentials (org URL, client ID, private key, scopes) and ensure they are properly configured.",
|
||||
},
|
||||
(14002, "OktaSetUpIdentityError"): {
|
||||
"message": "Okta identity setup error due to bad credentials",
|
||||
"remediation": "Check the OAuth credentials and confirm the service app has been granted the required read scopes.",
|
||||
},
|
||||
(14003, "OktaInvalidCredentialsError"): {
|
||||
"message": "Okta credentials are not valid",
|
||||
"remediation": "Check the client ID and private key for the Okta service app.",
|
||||
},
|
||||
(14004, "OktaInvalidOrgDomainError"): {
|
||||
"message": "Okta organization domain is not valid",
|
||||
"remediation": "Provide an Okta-managed domain such as <org>.okta.com (or .oktapreview.com / .okta-emea.com / .okta-gov.com / .okta.mil / .okta-miltest.com / .trex-govcloud.com), with no scheme and no trailing slash.",
|
||||
},
|
||||
(14005, "OktaPrivateKeyFileError"): {
|
||||
"message": "Okta private key file could not be read",
|
||||
"remediation": "Check the file path and permissions, and ensure the file contains a PEM-encoded RSA key or a JWK JSON document.",
|
||||
},
|
||||
(14006, "OktaInsufficientPermissionsError"): {
|
||||
"message": "Okta service app is missing required scopes",
|
||||
"remediation": "Have a Super Admin grant the required *.read scopes to the service app and assign the Read-Only Administrator role.",
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
provider = "Okta"
|
||||
error_info = self.OKTA_ERROR_CODES.get((code, self.__class__.__name__))
|
||||
if error_info is None:
|
||||
error_info = {
|
||||
"message": message or "Unknown Okta error.",
|
||||
"remediation": "Check the Okta API documentation for more details.",
|
||||
}
|
||||
elif message:
|
||||
error_info = error_info.copy()
|
||||
error_info["message"] = message
|
||||
super().__init__(
|
||||
code=code,
|
||||
source=provider,
|
||||
file=file,
|
||||
original_exception=original_exception,
|
||||
error_info=error_info,
|
||||
)
|
||||
|
||||
|
||||
class OktaCredentialsError(OktaBaseException):
|
||||
"""Base class for Okta credentials errors."""
|
||||
|
||||
def __init__(self, code, file=None, original_exception=None, message=None):
|
||||
super().__init__(code, file, original_exception, message)
|
||||
|
||||
|
||||
class OktaEnvironmentVariableError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14000, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaSetUpSessionError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14001, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaSetUpIdentityError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14002, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInvalidCredentialsError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14003, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInvalidOrgDomainError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14004, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaPrivateKeyFileError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14005, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
|
||||
|
||||
class OktaInsufficientPermissionsError(OktaCredentialsError):
|
||||
def __init__(self, file=None, original_exception=None, message=None):
|
||||
super().__init__(
|
||||
14006, file=file, original_exception=original_exception, message=message
|
||||
)
|
||||
@@ -0,0 +1,43 @@
|
||||
def init_parser(self):
|
||||
"""Init the Okta Provider CLI parser.
|
||||
|
||||
The Okta provider authenticates with OAuth 2.0 (private-key JWT). The
|
||||
private key is intentionally not exposed as a CLI flag — secrets must
|
||||
be supplied via the `OKTA_PRIVATE_KEY` or `OKTA_PRIVATE_KEY_FILE`
|
||||
environment variable. Non-secret values (org URL, client ID, scopes)
|
||||
are flag-configurable.
|
||||
"""
|
||||
okta_parser = self.subparsers.add_parser(
|
||||
"okta", parents=[self.common_providers_parser], help="Okta Provider"
|
||||
)
|
||||
okta_auth_subparser = okta_parser.add_argument_group("Authentication")
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-org-domain",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Okta organization domain (e.g. acme.okta.com). Must be an "
|
||||
"Okta-managed domain (.okta.com / .oktapreview.com / "
|
||||
".okta-emea.com / .okta-gov.com / .okta.mil / "
|
||||
".okta-miltest.com / .trex-govcloud.com), without scheme or path."
|
||||
),
|
||||
default=None,
|
||||
metavar="OKTA_ORG_DOMAIN",
|
||||
)
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-client-id",
|
||||
nargs="?",
|
||||
help="Okta service app Client ID for OAuth 2.0 (private-key JWT)",
|
||||
default=None,
|
||||
metavar="OKTA_CLIENT_ID",
|
||||
)
|
||||
okta_auth_subparser.add_argument(
|
||||
"--okta-scopes",
|
||||
nargs="+",
|
||||
help=(
|
||||
"OAuth scopes to request, space-separated "
|
||||
"(e.g. okta.policies.read okta.users.read). Defaults to the "
|
||||
"read scopes required by the bundled checks."
|
||||
),
|
||||
default=None,
|
||||
metavar="OKTA_SCOPES",
|
||||
)
|
||||
@@ -0,0 +1,14 @@
|
||||
from prowler.lib.check.models import CheckReportOkta
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
from prowler.lib.outputs.utils import unroll_dict, unroll_tags
|
||||
|
||||
|
||||
class OktaMutelist(Mutelist):
|
||||
def is_finding_muted(self, finding: CheckReportOkta, org_domain: str) -> bool:
|
||||
return self.is_muted(
|
||||
org_domain,
|
||||
finding.check_metadata.CheckID,
|
||||
"*",
|
||||
finding.resource_name,
|
||||
unroll_dict(unroll_tags(finding.resource_tags)),
|
||||
)
|
||||
@@ -0,0 +1,34 @@
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from okta.client import Client as OktaSDKClient
|
||||
|
||||
from prowler.providers.okta.models import OktaSession
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prowler.providers.okta.okta_provider import OktaProvider
|
||||
|
||||
|
||||
class OktaService:
|
||||
"""Base class for Okta service implementations.
|
||||
|
||||
Wraps the async okta-sdk-python `Client` so that subclasses can stay
|
||||
synchronous like the other Prowler providers. The SDK auto-refreshes
|
||||
the OAuth access token; nothing to manage here.
|
||||
"""
|
||||
|
||||
def __init__(self, service: str, provider: "OktaProvider"):
|
||||
self.provider = provider
|
||||
self.service = service
|
||||
self.client = self.__set_client__(provider.session)
|
||||
self.audit_config = provider.audit_config
|
||||
self.fixer_config = provider.fixer_config
|
||||
|
||||
@staticmethod
|
||||
def __set_client__(session: OktaSession) -> OktaSDKClient:
|
||||
return OktaSDKClient(session.to_sdk_config())
|
||||
|
||||
@staticmethod
|
||||
def _run(coro):
|
||||
"""Run an okta-sdk-python coroutine from synchronous code."""
|
||||
return asyncio.run(coro)
|
||||
@@ -0,0 +1,48 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.config.config import output_file_timestamp
|
||||
from prowler.providers.common.models import ProviderOutputOptions
|
||||
|
||||
|
||||
class OktaSession(BaseModel):
|
||||
org_domain: str
|
||||
client_id: str
|
||||
scopes: list[str]
|
||||
private_key: str
|
||||
|
||||
def to_sdk_config(self) -> dict:
|
||||
# Shared by the credential probe (OktaProvider.setup_identity) and
|
||||
# the service-level client (OktaService.__set_client__). Keeping the
|
||||
# builder in one place stops the two SDK config dicts from drifting.
|
||||
# The Okta SDK expects a fully-qualified `orgUrl`; we build it from
|
||||
# the validated domain so user input stays scheme-free.
|
||||
# DPoP proofs are sent on every token request — required by tenants
|
||||
# with "Demonstrating Proof of Possession" enabled on the service
|
||||
# app (or org-wide), harmless on tenants that don't.
|
||||
return {
|
||||
"orgUrl": f"https://{self.org_domain}",
|
||||
"authorizationMode": "PrivateKey",
|
||||
"clientId": self.client_id,
|
||||
"scopes": self.scopes,
|
||||
"privateKey": self.private_key,
|
||||
"dpopEnabled": True,
|
||||
}
|
||||
|
||||
|
||||
class OktaIdentityInfo(BaseModel):
|
||||
org_domain: str
|
||||
client_id: str
|
||||
|
||||
|
||||
class OktaOutputOptions(ProviderOutputOptions):
|
||||
def __init__(self, arguments, bulk_checks_metadata, identity):
|
||||
super().__init__(arguments, bulk_checks_metadata)
|
||||
if (
|
||||
not hasattr(arguments, "output_filename")
|
||||
or arguments.output_filename is None
|
||||
):
|
||||
self.output_filename = (
|
||||
f"prowler-output-{identity.org_domain}-{output_file_timestamp}"
|
||||
)
|
||||
else:
|
||||
self.output_filename = arguments.output_filename
|
||||
@@ -0,0 +1,375 @@
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
from os import environ
|
||||
from typing import Optional, Union
|
||||
|
||||
from colorama import Fore, Style
|
||||
from okta.client import Client as OktaSDKClient
|
||||
|
||||
from prowler.config.config import (
|
||||
default_config_file_path,
|
||||
get_default_mute_file_path,
|
||||
load_and_validate_config_file,
|
||||
)
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.mutelist.mutelist import Mutelist
|
||||
from prowler.lib.utils.utils import print_boxes
|
||||
from prowler.providers.common.models import Audit_Metadata, Connection
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.okta.exceptions.exceptions import (
|
||||
OktaEnvironmentVariableError,
|
||||
OktaInsufficientPermissionsError,
|
||||
OktaInvalidCredentialsError,
|
||||
OktaInvalidOrgDomainError,
|
||||
OktaPrivateKeyFileError,
|
||||
OktaSetUpIdentityError,
|
||||
OktaSetUpSessionError,
|
||||
)
|
||||
from prowler.providers.okta.lib.mutelist.mutelist import OktaMutelist
|
||||
from prowler.providers.okta.models import OktaIdentityInfo, OktaSession
|
||||
|
||||
DEFAULT_SCOPES = ["okta.policies.read"]
|
||||
# Accept only Okta-managed domains. Custom (vanity) domains are rejected on
|
||||
# purpose — they're a recurring source of typos and silent misconfig and
|
||||
# Prowler's audience overwhelmingly uses Okta-managed hosts. The TLDs below
|
||||
# match the set the Okta SDK whitelists in `okta.config.config_validator`,
|
||||
# which includes the commercial, preview, EMEA and US gov/mil environments.
|
||||
# If a customer with a custom domain shows up, lift this guard behind an
|
||||
# explicit opt-in.
|
||||
ORG_DOMAIN_RE = re.compile(
|
||||
r"^[a-z0-9][a-z0-9-]*\.("
|
||||
r"okta\.com|oktapreview\.com|okta-emea\.com|"
|
||||
r"okta-gov\.com|okta\.mil|okta-miltest\.com|trex-govcloud\.com"
|
||||
r")$"
|
||||
)
|
||||
|
||||
|
||||
class OktaProvider(Provider):
|
||||
"""Okta Provider class.
|
||||
|
||||
Authenticates against an Okta organization using OAuth 2.0 with a
|
||||
private-key JWT (Client Credentials grant). The SDK requests and
|
||||
refreshes the access token internally.
|
||||
|
||||
Attributes:
|
||||
_type (str): The type of the provider.
|
||||
_auth_method (str): The authentication method used by the provider.
|
||||
_session (OktaSession): The session object for the provider.
|
||||
_identity (OktaIdentityInfo): The identity information for the provider.
|
||||
_audit_config (dict): The audit configuration for the provider.
|
||||
_fixer_config (dict): The fixer configuration for the provider.
|
||||
_mutelist (Mutelist): The mutelist for the provider.
|
||||
audit_metadata (Audit_Metadata): The audit metadata for the provider.
|
||||
"""
|
||||
|
||||
_type: str = "okta"
|
||||
_auth_method: str = None
|
||||
_session: OktaSession
|
||||
_identity: OktaIdentityInfo
|
||||
_audit_config: dict
|
||||
_fixer_config: dict
|
||||
_mutelist: Mutelist
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
okta_scopes: Optional[Union[str, list[str]]] = None,
|
||||
config_path: str = None,
|
||||
config_content: dict = None,
|
||||
fixer_config: dict = {},
|
||||
mutelist_path: str = None,
|
||||
mutelist_content: dict = None,
|
||||
):
|
||||
"""Okta Provider constructor."""
|
||||
logger.info("Instantiating Okta Provider...")
|
||||
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=okta_org_domain,
|
||||
okta_client_id=okta_client_id,
|
||||
okta_private_key=okta_private_key,
|
||||
okta_private_key_file=okta_private_key_file,
|
||||
)
|
||||
self._session = OktaProvider.setup_session(
|
||||
org_domain=okta_org_domain,
|
||||
client_id=okta_client_id,
|
||||
private_key=okta_private_key,
|
||||
private_key_file=okta_private_key_file,
|
||||
scopes=okta_scopes,
|
||||
)
|
||||
self._identity = OktaProvider.setup_identity(self._session)
|
||||
self._auth_method = "OAuth 2.0 (private-key JWT)"
|
||||
|
||||
if config_content:
|
||||
self._audit_config = config_content
|
||||
else:
|
||||
if not config_path:
|
||||
config_path = default_config_file_path
|
||||
self._audit_config = load_and_validate_config_file(self._type, config_path)
|
||||
self._fixer_config = fixer_config
|
||||
|
||||
if mutelist_content:
|
||||
self._mutelist = OktaMutelist(mutelist_content=mutelist_content)
|
||||
else:
|
||||
if not mutelist_path:
|
||||
mutelist_path = get_default_mute_file_path(self.type)
|
||||
self._mutelist = OktaMutelist(mutelist_path=mutelist_path)
|
||||
|
||||
Provider.set_global_provider(self)
|
||||
|
||||
@property
|
||||
def auth_method(self):
|
||||
return self._auth_method
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
return self._identity
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._type
|
||||
|
||||
@property
|
||||
def audit_config(self):
|
||||
return self._audit_config
|
||||
|
||||
@property
|
||||
def fixer_config(self):
|
||||
return self._fixer_config
|
||||
|
||||
@property
|
||||
def mutelist(self) -> OktaMutelist:
|
||||
return self._mutelist
|
||||
|
||||
@staticmethod
|
||||
def validate_arguments(
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
):
|
||||
"""Validate that all required OAuth credentials are provided.
|
||||
|
||||
Falls back to the matching `OKTA_*` environment variables when a CLI
|
||||
argument is not supplied. The private key may be supplied as raw
|
||||
content (preferred for API/UI integrations) or as a file path.
|
||||
Raises a single combined error if any required value is missing.
|
||||
"""
|
||||
org_domain = okta_org_domain or environ.get("OKTA_ORG_DOMAIN", "")
|
||||
client_id = okta_client_id or environ.get("OKTA_CLIENT_ID", "")
|
||||
private_key = okta_private_key or environ.get("OKTA_PRIVATE_KEY", "")
|
||||
private_key_file = okta_private_key_file or environ.get(
|
||||
"OKTA_PRIVATE_KEY_FILE", ""
|
||||
)
|
||||
|
||||
missing = []
|
||||
if not org_domain:
|
||||
missing.append("--okta-org-domain / OKTA_ORG_DOMAIN")
|
||||
if not client_id:
|
||||
missing.append("--okta-client-id / OKTA_CLIENT_ID")
|
||||
if not private_key and not private_key_file:
|
||||
missing.append("OKTA_PRIVATE_KEY (or OKTA_PRIVATE_KEY_FILE)")
|
||||
if missing:
|
||||
raise OktaEnvironmentVariableError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
"Okta provider requires all OAuth credentials. Missing: "
|
||||
+ ", ".join(missing)
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def setup_session(
|
||||
org_domain: str = "",
|
||||
client_id: str = "",
|
||||
private_key: str = "",
|
||||
private_key_file: str = "",
|
||||
scopes: Optional[Union[str, list[str]]] = None,
|
||||
) -> OktaSession:
|
||||
"""Build an OktaSession from CLI args, falling back to environment variables.
|
||||
|
||||
Accepts the private key as raw content (`private_key` /
|
||||
`OKTA_PRIVATE_KEY`) or as a file path (`private_key_file` /
|
||||
`OKTA_PRIVATE_KEY_FILE`). Content takes precedence when both are
|
||||
supplied — this matches the GitHub provider pattern and keeps the
|
||||
API/UI integrations from having to write keys to disk.
|
||||
"""
|
||||
try:
|
||||
org_domain = org_domain or environ.get("OKTA_ORG_DOMAIN", "")
|
||||
client_id = client_id or environ.get("OKTA_CLIENT_ID", "")
|
||||
private_key = private_key or environ.get("OKTA_PRIVATE_KEY", "")
|
||||
private_key_file = private_key_file or environ.get(
|
||||
"OKTA_PRIVATE_KEY_FILE", ""
|
||||
)
|
||||
if not scopes:
|
||||
scopes = environ.get("OKTA_SCOPES", "")
|
||||
|
||||
org_domain = org_domain.strip().lower()
|
||||
if not ORG_DOMAIN_RE.match(org_domain):
|
||||
raise OktaInvalidOrgDomainError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
f"Invalid Okta org domain: '{org_domain}'. Expected "
|
||||
"an Okta-managed domain such as <org>.okta.com "
|
||||
"(or .oktapreview.com / .okta-emea.com / "
|
||||
".okta-gov.com / .okta.mil / .okta-miltest.com / "
|
||||
".trex-govcloud.com), with no scheme and no path."
|
||||
),
|
||||
)
|
||||
|
||||
if private_key:
|
||||
private_key = private_key.strip()
|
||||
else:
|
||||
try:
|
||||
with open(private_key_file, "r") as fh:
|
||||
private_key = fh.read().strip()
|
||||
except OSError as error:
|
||||
raise OktaPrivateKeyFileError(
|
||||
file=os.path.basename(__file__),
|
||||
original_exception=error,
|
||||
message=f"Could not read private key file '{private_key_file}': {error}",
|
||||
)
|
||||
if not private_key:
|
||||
raise OktaPrivateKeyFileError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
f"Private key file '{private_key_file}' is empty."
|
||||
if private_key_file
|
||||
else "Private key content is empty."
|
||||
),
|
||||
)
|
||||
|
||||
# Accept either a CSV string (from env var / legacy callers) or
|
||||
# a list[str] (from programmatic callers and the CLI's nargs="+").
|
||||
# List elements may themselves contain commas (e.g. "a,b") and
|
||||
# are flattened to support mixed input.
|
||||
if isinstance(scopes, str):
|
||||
raw_items = scopes.split(",")
|
||||
elif isinstance(scopes, list):
|
||||
raw_items = [item for s in scopes for item in str(s).split(",")]
|
||||
else:
|
||||
raw_items = []
|
||||
scope_list = [s.strip() for s in raw_items if s and s.strip()]
|
||||
if not scope_list:
|
||||
scope_list = list(DEFAULT_SCOPES)
|
||||
|
||||
return OktaSession(
|
||||
org_domain=org_domain,
|
||||
client_id=client_id,
|
||||
scopes=scope_list,
|
||||
private_key=private_key,
|
||||
)
|
||||
|
||||
except (OktaInvalidOrgDomainError, OktaPrivateKeyFileError):
|
||||
raise
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise OktaSetUpSessionError(original_exception=error)
|
||||
|
||||
@staticmethod
|
||||
def setup_identity(session: OktaSession) -> OktaIdentityInfo:
|
||||
"""Synthesize identity from the session and verify credentials.
|
||||
|
||||
Service apps don't represent a human user, so the identity is the
|
||||
org URL plus the service-app client ID. We still hit the cheapest
|
||||
scope-covered endpoint (`list_policies` with limit=1) to fail loud
|
||||
when credentials, scopes, or the granted admin role are wrong.
|
||||
"""
|
||||
|
||||
async def _probe():
|
||||
client = OktaSDKClient(session.to_sdk_config())
|
||||
return await client.list_policies(type="OKTA_SIGN_ON", limit="1")
|
||||
|
||||
try:
|
||||
result = asyncio.run(_probe())
|
||||
# SDK returns (items, resp, err) on the normal path and (items, err)
|
||||
# only on early request-creation errors. The error is always last.
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
err_text = str(err).lower()
|
||||
# Distinguish scope/role failures from generic credential
|
||||
# failures — different remediation paths in the docs.
|
||||
permission_signals = (
|
||||
"invalid_scope",
|
||||
"forbidden",
|
||||
"not authorized",
|
||||
"permission",
|
||||
)
|
||||
if any(signal in err_text for signal in permission_signals):
|
||||
raise OktaInsufficientPermissionsError(
|
||||
file=os.path.basename(__file__),
|
||||
message=(
|
||||
"Okta rejected the credential probe with a "
|
||||
f"permission-related error: {err}"
|
||||
),
|
||||
)
|
||||
raise OktaInvalidCredentialsError(
|
||||
file=os.path.basename(__file__),
|
||||
message=f"Failed to authenticate against Okta: {err}",
|
||||
)
|
||||
return OktaIdentityInfo(
|
||||
org_domain=session.org_domain,
|
||||
client_id=session.client_id,
|
||||
)
|
||||
except (OktaInvalidCredentialsError, OktaInsufficientPermissionsError):
|
||||
raise
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise OktaSetUpIdentityError(original_exception=error)
|
||||
|
||||
def print_credentials(self):
|
||||
report_lines = [
|
||||
f"Okta Domain: {Fore.YELLOW}{self.identity.org_domain}{Style.RESET_ALL}",
|
||||
f"Okta Client ID: {Fore.YELLOW}{self.identity.client_id}{Style.RESET_ALL}",
|
||||
f"Authentication Method: {Fore.YELLOW}{self.auth_method}{Style.RESET_ALL}",
|
||||
]
|
||||
report_title = (
|
||||
f"{Style.BRIGHT}Using the Okta credentials below:{Style.RESET_ALL}"
|
||||
)
|
||||
print_boxes(report_lines, report_title)
|
||||
|
||||
@staticmethod
|
||||
def test_connection(
|
||||
okta_org_domain: str = "",
|
||||
okta_client_id: str = "",
|
||||
okta_private_key: str = "",
|
||||
okta_private_key_file: str = "",
|
||||
okta_scopes: Optional[Union[str, list[str]]] = None,
|
||||
raise_on_exception: bool = True,
|
||||
) -> Connection:
|
||||
"""Test the connection to Okta with the provided OAuth credentials."""
|
||||
try:
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=okta_org_domain,
|
||||
okta_client_id=okta_client_id,
|
||||
okta_private_key=okta_private_key,
|
||||
okta_private_key_file=okta_private_key_file,
|
||||
)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=okta_org_domain,
|
||||
client_id=okta_client_id,
|
||||
private_key=okta_private_key,
|
||||
private_key_file=okta_private_key_file,
|
||||
scopes=okta_scopes,
|
||||
)
|
||||
OktaProvider.setup_identity(session)
|
||||
return Connection(is_connected=True)
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
if raise_on_exception:
|
||||
raise error
|
||||
return Connection(error=error)
|
||||
@@ -0,0 +1,4 @@
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.okta.services.signon.signon_service import Signon
|
||||
|
||||
signon_client = Signon(Provider.get_global_provider())
|
||||
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"Provider": "okta",
|
||||
"CheckID": "signon_global_session_idle_timeout_15min",
|
||||
"CheckTitle": "Default Global Session Policy has a Priority 1 non-default rule enforcing 15-minute idle timeout",
|
||||
"CheckType": [],
|
||||
"ServiceName": "signon",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "NotDefined",
|
||||
"ResourceGroup": "governance",
|
||||
"Description": "The **Default Global Session Policy** must have a **Priority 1** rule that is **not** the built-in `Default Rule`, and that rule must set **Maximum Okta global session idle time** to 15 minutes or less. The threshold defaults to 15 minutes and is overridable via the `okta_max_session_idle_minutes` key in the audit config.",
|
||||
"Risk": "Without a 15-minute idle timeout, an unattended workstation leaves an authenticated Okta session open indefinitely, allowing an attacker physical or remote access to take over the user's identity and pivot into every downstream application that trusts Okta SSO.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://help.okta.com/oie/en-us/content/topics/identity-engine/policies/about-okta-sign-on-policies.htm",
|
||||
"https://developer.okta.com/docs/api/openapi/okta-management/management/tag/Policy/"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Sign in to the Okta Admin Console as a Super Admin\n2. Go to Security > Global Session Policy\n3. Open the Default Policy\n4. Add or edit a non-default rule\n5. Move that rule to Priority 1 so it is evaluated before the built-in Default Rule\n6. Set 'Maximum Okta global session idle time' to 15 minutes or less\n7. Save the rule",
|
||||
"Terraform": "resource \"okta_policy_rule_signon\" \"prowler_idle_timeout_15min\" {\n policy_id = okta_policy_signon.default.id\n name = \"Prowler-enforced idle timeout\"\n status = \"ACTIVE\"\n session_idle = 15\n session_persistent = false\n}\n"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure the Default Global Session Policy so its Priority 1 non-default rule sets the Maximum Okta global session idle time to 15 minutes or less.",
|
||||
"Url": "https://hub.prowler.com/check/signon_global_session_idle_timeout_15min"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"identity-access"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
from prowler.lib.check.models import Check, CheckReportOkta
|
||||
from prowler.providers.okta.services.signon.signon_client import signon_client
|
||||
from prowler.providers.okta.services.signon.signon_service import GlobalSessionPolicy
|
||||
|
||||
DEFAULT_THRESHOLD_MINUTES = 15
|
||||
|
||||
|
||||
class signon_global_session_idle_timeout_15min(Check):
|
||||
"""STIG V-273186 / OKTA-APP-000020.
|
||||
|
||||
The DISA STIG requires the Okta Default Policy to have an active
|
||||
Priority 1 rule that is not the built-in Default Rule, and that
|
||||
rule must set the maximum Okta global session idle time to the
|
||||
configured threshold or lower (defaults to 15 minutes per STIG;
|
||||
override via `okta_max_session_idle_minutes` in the audit config).
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportOkta]:
|
||||
audit_config = signon_client.audit_config or {}
|
||||
threshold = audit_config.get(
|
||||
"okta_max_session_idle_minutes", DEFAULT_THRESHOLD_MINUTES
|
||||
)
|
||||
org_domain = signon_client.provider.identity.org_domain
|
||||
policy = self._get_default_policy()
|
||||
report = CheckReportOkta(
|
||||
metadata=self.metadata(), resource=policy, org_domain=org_domain
|
||||
)
|
||||
|
||||
if policy.id == "default-policy-missing":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"Default Global Session Policy was not found. STIG V-273186 "
|
||||
"requires the Default Policy to contain an active Priority 1 "
|
||||
f"non-default rule with idle timeout <= {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if policy.status and policy.status.upper() != "ACTIVE":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' is in "
|
||||
f"status '{policy.status}'. STIG V-273186 requires an active "
|
||||
"Default Policy with an active Priority 1 non-default rule."
|
||||
)
|
||||
return [report]
|
||||
|
||||
active_rules = sorted(
|
||||
[
|
||||
rule
|
||||
for rule in policy.rules
|
||||
if not rule.status or rule.status.upper() == "ACTIVE"
|
||||
],
|
||||
key=lambda rule: (
|
||||
rule.priority if rule.priority is not None else float("inf"),
|
||||
rule.name,
|
||||
),
|
||||
)
|
||||
if not active_rules:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' has no active "
|
||||
"rules. STIG V-273186 requires an active Priority 1 non-default "
|
||||
f"rule with idle timeout <= {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
priority_one_rule = active_rules[0]
|
||||
if priority_one_rule.priority != 1:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' has no active "
|
||||
f"Priority 1 rule. The first active rule is '{priority_one_rule.name}' "
|
||||
f"at priority {priority_one_rule.priority}."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if priority_one_rule.is_default or priority_one_rule.name == "Default Rule":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Default Global Session Policy '{policy.name}' uses "
|
||||
f"'{priority_one_rule.name}' as its active Priority 1 rule. "
|
||||
"The STIG requires a non-default Priority 1 rule."
|
||||
)
|
||||
return [report]
|
||||
|
||||
idle_timeout = priority_one_rule.max_session_idle_minutes
|
||||
if idle_timeout is None:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' does not define "
|
||||
"a maximum Okta global session idle time."
|
||||
)
|
||||
return [report]
|
||||
|
||||
if idle_timeout <= threshold:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' sets the "
|
||||
f"maximum Okta global session idle time to {idle_timeout} "
|
||||
f"minutes, meeting the configured threshold of {threshold} minutes."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Priority 1 non-default rule '{priority_one_rule.name}' in "
|
||||
f"Default Global Session Policy '{policy.name}' sets the "
|
||||
f"maximum Okta global session idle time to {idle_timeout} "
|
||||
f"minutes, exceeding the configured threshold of {threshold} minutes."
|
||||
)
|
||||
return [report]
|
||||
|
||||
@staticmethod
|
||||
def _get_default_policy() -> GlobalSessionPolicy:
|
||||
for policy in signon_client.global_session_policies.values():
|
||||
if policy.is_default or policy.name == "Default Policy":
|
||||
return policy
|
||||
return GlobalSessionPolicy(
|
||||
id="default-policy-missing",
|
||||
name="Default Policy",
|
||||
priority=1,
|
||||
status="MISSING",
|
||||
is_default=True,
|
||||
rules=[],
|
||||
)
|
||||
@@ -0,0 +1,178 @@
|
||||
from typing import Optional
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.okta.lib.service.service import OktaService
|
||||
|
||||
|
||||
def _next_after_cursor(resp) -> Optional[str]:
|
||||
"""Extract the `after` cursor from a `Link: ...; rel="next"` header.
|
||||
|
||||
Returns None when there is no next page. Header format follows RFC 5988
|
||||
and Okta's pagination guide.
|
||||
"""
|
||||
if resp is None:
|
||||
return None
|
||||
headers = getattr(resp, "headers", None) or {}
|
||||
link = headers.get("link") or headers.get("Link") or ""
|
||||
if not link:
|
||||
return None
|
||||
for part in link.split(","):
|
||||
if 'rel="next"' not in part:
|
||||
continue
|
||||
url_segment = part.split(";", 1)[0].strip().lstrip("<").rstrip(">")
|
||||
cursor = parse_qs(urlparse(url_segment).query).get("after", [None])[0]
|
||||
if cursor:
|
||||
return cursor
|
||||
return None
|
||||
|
||||
|
||||
class Signon(OktaService):
|
||||
"""Fetches OKTA_SIGN_ON policies and their rules.
|
||||
|
||||
Populates `self.global_session_policies` keyed by policy id. Each
|
||||
policy carries its rules; downstream checks read directly from this
|
||||
structure.
|
||||
"""
|
||||
|
||||
def __init__(self, provider):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.global_session_policies: dict[str, GlobalSessionPolicy] = (
|
||||
self._list_global_session_policies()
|
||||
)
|
||||
|
||||
def _list_global_session_policies(self) -> dict:
|
||||
logger.info("Signon - Listing OKTA_SIGN_ON policies and rules...")
|
||||
try:
|
||||
return self._run(self._fetch_all())
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return {}
|
||||
|
||||
async def _fetch_all(self) -> dict:
|
||||
result: dict[str, GlobalSessionPolicy] = {}
|
||||
all_policies, err = await self._paginate(
|
||||
lambda after: self.client.list_policies(type="OKTA_SIGN_ON", after=after)
|
||||
)
|
||||
if err is not None:
|
||||
logger.error(f"Error listing OKTA_SIGN_ON policies: {err}")
|
||||
return result
|
||||
|
||||
for policy in all_policies:
|
||||
rules = await self._fetch_rules(policy.id)
|
||||
result[policy.id] = GlobalSessionPolicy(
|
||||
id=policy.id,
|
||||
name=getattr(policy, "name", "") or "",
|
||||
priority=getattr(policy, "priority", None),
|
||||
status=getattr(policy, "status", "") or "",
|
||||
is_default=bool(getattr(policy, "system", False)),
|
||||
rules=rules,
|
||||
)
|
||||
return result
|
||||
|
||||
async def _fetch_rules(self, policy_id: str) -> list:
|
||||
# Okta's `list_policy_rules` endpoint does not expose an `after`
|
||||
# cursor in the SDK signature, so we call once with a generous
|
||||
# `limit`. Tenants with more rules per policy than the limit would
|
||||
# silently truncate; this is rare (most policies have <10 rules).
|
||||
rule_fetch_limit = 100
|
||||
rules_out: list[GlobalSessionPolicyRule] = []
|
||||
result = await self.client.list_policy_rules(
|
||||
policy_id, limit=str(rule_fetch_limit)
|
||||
)
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
logger.error(f"Error listing rules for policy {policy_id}: {err}")
|
||||
return rules_out
|
||||
all_rules = list(result[0] or [])
|
||||
if len(all_rules) >= rule_fetch_limit:
|
||||
logger.warning(
|
||||
f"Policy {policy_id} returned {len(all_rules)} rules — the "
|
||||
f"per-policy fetch limit ({rule_fetch_limit}) was hit; any "
|
||||
"rules beyond this limit are not evaluated by Prowler. "
|
||||
"Review the policy in the Okta Admin Console."
|
||||
)
|
||||
|
||||
for rule in all_rules:
|
||||
actions = getattr(rule, "actions", None)
|
||||
signon = getattr(actions, "signon", None) if actions else None
|
||||
session = getattr(signon, "session", None) if signon else None
|
||||
conditions = getattr(rule, "conditions", None)
|
||||
network = getattr(conditions, "network", None) if conditions else None
|
||||
rules_out.append(
|
||||
GlobalSessionPolicyRule(
|
||||
id=getattr(rule, "id", "") or "",
|
||||
name=getattr(rule, "name", "") or "",
|
||||
priority=getattr(rule, "priority", None),
|
||||
status=getattr(rule, "status", "") or "",
|
||||
is_default=bool(getattr(rule, "system", False)),
|
||||
max_session_idle_minutes=getattr(
|
||||
session, "max_session_idle_minutes", None
|
||||
),
|
||||
max_session_lifetime_minutes=getattr(
|
||||
session, "max_session_lifetime_minutes", None
|
||||
),
|
||||
use_persistent_cookie=getattr(
|
||||
session, "use_persistent_cookie", None
|
||||
),
|
||||
network_zones_include=list(getattr(network, "include", None) or []),
|
||||
network_zones_exclude=list(getattr(network, "exclude", None) or []),
|
||||
)
|
||||
)
|
||||
return rules_out
|
||||
|
||||
@staticmethod
|
||||
async def _paginate(fetch):
|
||||
"""Drain all pages of an SDK list call.
|
||||
|
||||
`fetch` is a callable that takes the `after` cursor (or None for
|
||||
the first page) and returns the SDK's standard `(items, resp, err)`
|
||||
tuple. We follow `Link: rel="next"` headers until exhausted.
|
||||
"""
|
||||
all_items = []
|
||||
result = await fetch(None)
|
||||
# Defensive against the SDK's 2-tuple early-error path: error is last.
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
return [], err
|
||||
items = result[0]
|
||||
resp = result[1] if len(result) >= 3 else None
|
||||
all_items.extend(items or [])
|
||||
while True:
|
||||
cursor = _next_after_cursor(resp)
|
||||
if not cursor:
|
||||
break
|
||||
result = await fetch(cursor)
|
||||
err = result[-1]
|
||||
if err is not None:
|
||||
return all_items, err
|
||||
items = result[0]
|
||||
resp = result[1] if len(result) >= 3 else None
|
||||
all_items.extend(items or [])
|
||||
return all_items, None
|
||||
|
||||
|
||||
class GlobalSessionPolicyRule(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
priority: Optional[int] = None
|
||||
status: str = ""
|
||||
is_default: bool = False
|
||||
max_session_idle_minutes: Optional[int] = None
|
||||
max_session_lifetime_minutes: Optional[int] = None
|
||||
use_persistent_cookie: Optional[bool] = None
|
||||
network_zones_include: list[str] = []
|
||||
network_zones_exclude: list[str] = []
|
||||
|
||||
|
||||
class GlobalSessionPolicy(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
priority: Optional[int] = None
|
||||
status: str = ""
|
||||
is_default: bool = False
|
||||
rules: list[GlobalSessionPolicyRule] = []
|
||||
@@ -59,6 +59,7 @@ dependencies = [
|
||||
"microsoft-kiota-abstractions==1.9.2",
|
||||
"msgraph-sdk==1.55.0",
|
||||
"numpy==2.0.2",
|
||||
"okta==3.4.2",
|
||||
"openstacksdk==4.2.0",
|
||||
"pandas==2.2.3",
|
||||
"py-ocsf-models==0.8.1",
|
||||
|
||||
@@ -488,3 +488,8 @@ m365:
|
||||
# Exchange Mailbox Settings
|
||||
# m365.exchange_mailbox_properties_auditing_enabled
|
||||
audit_log_age: 90 # maximum number of days to keep audit logs
|
||||
|
||||
okta:
|
||||
# Okta Sign-On Policies
|
||||
# okta.signon_global_session_idle_timeout_15min
|
||||
okta_max_session_idle_minutes: 15
|
||||
|
||||
@@ -17,7 +17,7 @@ prowler_command = "prowler"
|
||||
|
||||
# capsys
|
||||
# https://docs.pytest.org/en/7.1.x/how-to/capture-stdout-stderr.html
|
||||
prowler_default_usage_error = "usage: prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ..."
|
||||
prowler_default_usage_error = "usage: prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ..."
|
||||
|
||||
|
||||
def mock_get_available_providers():
|
||||
|
||||
@@ -408,3 +408,83 @@ class Test_iam_no_custom_policy_permissive_role_assumption:
|
||||
assert search(
|
||||
"allows permissive STS Role assumption", result[0].status_extended
|
||||
)
|
||||
|
||||
@mock_aws
|
||||
def test_unattached_policy_skipped_when_scan_unused_services_disabled(self):
|
||||
iam_client = client("iam")
|
||||
policy_name = "unattached_permissive_assume_role"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "sts:AssumeRole", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)
|
||||
|
||||
from prowler.providers.aws.services.iam.iam_service import IAM
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_no_custom_policy_permissive_role_assumption.iam_no_custom_policy_permissive_role_assumption.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_no_custom_policy_permissive_role_assumption.iam_no_custom_policy_permissive_role_assumption import (
|
||||
iam_no_custom_policy_permissive_role_assumption,
|
||||
)
|
||||
|
||||
check = iam_no_custom_policy_permissive_role_assumption()
|
||||
result = check.execute()
|
||||
assert result == []
|
||||
|
||||
@mock_aws
|
||||
def test_attached_policy_fails_when_scan_unused_services_disabled(self):
|
||||
iam_client = client("iam")
|
||||
user_name = "test_user_assume_role"
|
||||
policy_name = "attached_permissive_assume_role"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "sts:AssumeRole", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
arn = iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)["Policy"]["Arn"]
|
||||
iam_client.create_user(UserName=user_name)
|
||||
iam_client.attach_user_policy(UserName=user_name, PolicyArn=arn)
|
||||
|
||||
from prowler.providers.aws.services.iam.iam_service import IAM
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_no_custom_policy_permissive_role_assumption.iam_no_custom_policy_permissive_role_assumption.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_no_custom_policy_permissive_role_assumption.iam_no_custom_policy_permissive_role_assumption import (
|
||||
iam_no_custom_policy_permissive_role_assumption,
|
||||
)
|
||||
|
||||
check = iam_no_custom_policy_permissive_role_assumption()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert result[0].resource_arn == arn
|
||||
assert search(
|
||||
"allows permissive STS Role assumption", result[0].status_extended
|
||||
)
|
||||
|
||||
@@ -1261,3 +1261,86 @@ class Test_iam_policy_allows_privilege_escalation:
|
||||
permissions
|
||||
]:
|
||||
assert search(permission, finding.status_extended)
|
||||
|
||||
@mock_aws
|
||||
def test_unattached_policy_skipped_when_scan_unused_services_disabled(self):
|
||||
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
|
||||
policy_name = "unattached_privilege_escalation"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "iam:CreateAccessKey", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
from prowler.providers.aws.services.iam.iam_service import IAM
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
|
||||
iam_policy_allows_privilege_escalation,
|
||||
)
|
||||
|
||||
check = iam_policy_allows_privilege_escalation()
|
||||
result = check.execute()
|
||||
assert result == []
|
||||
|
||||
@mock_aws
|
||||
def test_attached_policy_fails_when_scan_unused_services_disabled(self):
|
||||
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
|
||||
user_name = "test_user_privesc"
|
||||
policy_name = "attached_privilege_escalation"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "iam:CreateAccessKey", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
policy_arn = iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)["Policy"]["Arn"]
|
||||
iam_client.create_user(UserName=user_name)
|
||||
iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
|
||||
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
from prowler.providers.aws.services.iam.iam_service import IAM
|
||||
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
),
|
||||
mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
|
||||
iam_policy_allows_privilege_escalation,
|
||||
)
|
||||
|
||||
check = iam_policy_allows_privilege_escalation()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert result[0].resource_arn == policy_arn
|
||||
assert search(
|
||||
f"Custom Policy {policy_arn} allows privilege escalation",
|
||||
result[0].status_extended,
|
||||
)
|
||||
|
||||
@@ -207,3 +207,78 @@ class Test_iam_policy_no_full_access_to_cloudtrail:
|
||||
assert result[0].resource_id == "policy_no_cloudtrail_full_no_actions"
|
||||
assert result[0].resource_arn == arn
|
||||
assert result[0].region == "us-east-1"
|
||||
|
||||
@mock_aws
|
||||
def test_unattached_policy_skipped_when_scan_unused_services_disabled(self):
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
|
||||
policy_name = "unattached_cloudtrail_full"
|
||||
policy_document_full_access = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "cloudtrail:*", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document_full_access)
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_no_full_access_to_cloudtrail.iam_policy_no_full_access_to_cloudtrail.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_full_access_to_cloudtrail.iam_policy_no_full_access_to_cloudtrail import (
|
||||
iam_policy_no_full_access_to_cloudtrail,
|
||||
)
|
||||
|
||||
check = iam_policy_no_full_access_to_cloudtrail()
|
||||
result = check.execute()
|
||||
assert result == []
|
||||
|
||||
@mock_aws
|
||||
def test_attached_policy_fails_when_scan_unused_services_disabled(self):
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
|
||||
user_name = "test_user_cloudtrail"
|
||||
policy_name = "attached_cloudtrail_full"
|
||||
policy_document_full_access = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "cloudtrail:*", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
arn = iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document_full_access)
|
||||
)["Policy"]["Arn"]
|
||||
iam_client.create_user(UserName=user_name)
|
||||
iam_client.attach_user_policy(UserName=user_name, PolicyArn=arn)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_no_full_access_to_cloudtrail.iam_policy_no_full_access_to_cloudtrail.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_full_access_to_cloudtrail.iam_policy_no_full_access_to_cloudtrail import (
|
||||
iam_policy_no_full_access_to_cloudtrail,
|
||||
)
|
||||
|
||||
check = iam_policy_no_full_access_to_cloudtrail()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"Custom Policy {policy_name} allows 'cloudtrail:*' privileges."
|
||||
)
|
||||
assert result[0].resource_arn == arn
|
||||
|
||||
@@ -329,6 +329,81 @@ class Test_iam_policy_no_full_access_to_kms_with_unicode:
|
||||
assert result[0].resource_arn == arn
|
||||
assert result[0].region == "us-east-1"
|
||||
|
||||
@mock_aws
|
||||
def test_unattached_policy_skipped_when_scan_unused_services_disabled(self):
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam")
|
||||
policy_name = "unattached_kms_full"
|
||||
policy_document_full_access = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "kms:*", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document_full_access)
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_no_full_access_to_kms.iam_policy_no_full_access_to_kms.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_full_access_to_kms.iam_policy_no_full_access_to_kms import (
|
||||
iam_policy_no_full_access_to_kms,
|
||||
)
|
||||
|
||||
check = iam_policy_no_full_access_to_kms()
|
||||
result = check.execute()
|
||||
assert result == []
|
||||
|
||||
@mock_aws
|
||||
def test_attached_policy_fails_when_scan_unused_services_disabled(self):
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam")
|
||||
user_name = "test_user_kms"
|
||||
policy_name = "attached_kms_full"
|
||||
policy_document_full_access = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{"Effect": "Allow", "Action": "kms:*", "Resource": "*"},
|
||||
],
|
||||
}
|
||||
arn = iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document_full_access)
|
||||
)["Policy"]["Arn"]
|
||||
iam_client.create_user(UserName=user_name)
|
||||
iam_client.attach_user_policy(UserName=user_name, PolicyArn=arn)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
"prowler.providers.aws.services.iam.iam_policy_no_full_access_to_kms.iam_policy_no_full_access_to_kms.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_full_access_to_kms.iam_policy_no_full_access_to_kms import (
|
||||
iam_policy_no_full_access_to_kms,
|
||||
)
|
||||
|
||||
check = iam_policy_no_full_access_to_kms()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert (
|
||||
result[0].status_extended
|
||||
== f"Custom Policy {policy_name} allows 'kms:*' privileges."
|
||||
)
|
||||
assert result[0].resource_arn == arn
|
||||
|
||||
@mock_aws
|
||||
def test_policy_full_access_and_full_deny_to_kms(self):
|
||||
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
|
||||
|
||||
@@ -507,3 +507,84 @@ class Test_iam_policy_no_wildcard_marketplace_subscribe:
|
||||
check = iam_policy_no_wildcard_marketplace_subscribe()
|
||||
result = check.execute()
|
||||
assert len(result) == 0
|
||||
|
||||
@mock_aws
|
||||
def test_unattached_policy_skipped_when_scan_unused_services_disabled(self):
|
||||
"""No FAIL for an unattached risky policy when --scan-unused-services is off."""
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam")
|
||||
policy_name = "unattached_marketplace_subscribe"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "aws-marketplace:Subscribe",
|
||||
"Resource": "*",
|
||||
},
|
||||
],
|
||||
}
|
||||
iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
f"{CHECK_MODULE_PATH}.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_wildcard_marketplace_subscribe.iam_policy_no_wildcard_marketplace_subscribe import (
|
||||
iam_policy_no_wildcard_marketplace_subscribe,
|
||||
)
|
||||
|
||||
check = iam_policy_no_wildcard_marketplace_subscribe()
|
||||
result = check.execute()
|
||||
assert result == []
|
||||
|
||||
@mock_aws
|
||||
def test_attached_policy_fails_when_scan_unused_services_disabled(self):
|
||||
"""Attached risky policy still FAILs when --scan-unused-services is off."""
|
||||
aws_provider = set_mocked_aws_provider(
|
||||
[AWS_REGION_US_EAST_1], scan_unused_services=False
|
||||
)
|
||||
iam_client = client("iam")
|
||||
user_name = "test_user_marketplace"
|
||||
policy_name = "attached_marketplace_subscribe"
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "aws-marketplace:Subscribe",
|
||||
"Resource": "*",
|
||||
},
|
||||
],
|
||||
}
|
||||
arn = iam_client.create_policy(
|
||||
PolicyName=policy_name, PolicyDocument=dumps(policy_document)
|
||||
)["Policy"]["Arn"]
|
||||
iam_client.create_user(UserName=user_name)
|
||||
iam_client.attach_user_policy(UserName=user_name, PolicyArn=arn)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=aws_provider,
|
||||
):
|
||||
with mock.patch(
|
||||
f"{CHECK_MODULE_PATH}.iam_client",
|
||||
new=IAM(aws_provider),
|
||||
):
|
||||
from prowler.providers.aws.services.iam.iam_policy_no_wildcard_marketplace_subscribe.iam_policy_no_wildcard_marketplace_subscribe import (
|
||||
iam_policy_no_wildcard_marketplace_subscribe,
|
||||
)
|
||||
|
||||
check = iam_policy_no_wildcard_marketplace_subscribe()
|
||||
result = check.execute()
|
||||
assert len(result) == 1
|
||||
assert result[0].status == "FAIL"
|
||||
assert result[0].resource_arn == arn
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
import pytest
|
||||
|
||||
from prowler.providers.okta.exceptions.exceptions import (
|
||||
OktaBaseException,
|
||||
OktaCredentialsError,
|
||||
OktaEnvironmentVariableError,
|
||||
OktaInsufficientPermissionsError,
|
||||
OktaInvalidCredentialsError,
|
||||
OktaInvalidOrgDomainError,
|
||||
OktaPrivateKeyFileError,
|
||||
OktaSetUpIdentityError,
|
||||
OktaSetUpSessionError,
|
||||
)
|
||||
|
||||
EXPECTED_CODES = {
|
||||
OktaEnvironmentVariableError: 14000,
|
||||
OktaSetUpSessionError: 14001,
|
||||
OktaSetUpIdentityError: 14002,
|
||||
OktaInvalidCredentialsError: 14003,
|
||||
OktaInvalidOrgDomainError: 14004,
|
||||
OktaPrivateKeyFileError: 14005,
|
||||
OktaInsufficientPermissionsError: 14006,
|
||||
}
|
||||
|
||||
|
||||
class Test_OktaExceptions:
|
||||
def test_all_codes_in_reserved_range(self):
|
||||
codes = [c for c, _ in OktaBaseException.OKTA_ERROR_CODES.keys()]
|
||||
assert all(14000 <= c <= 14999 for c in codes)
|
||||
assert len(codes) == len(set(codes)) # unique
|
||||
|
||||
def test_all_subclasses_inherit_from_credentials_error(self):
|
||||
for exc_cls in EXPECTED_CODES:
|
||||
assert issubclass(exc_cls, OktaCredentialsError)
|
||||
assert issubclass(exc_cls, OktaBaseException)
|
||||
|
||||
@pytest.mark.parametrize("exc_cls,code", list(EXPECTED_CODES.items()))
|
||||
def test_each_exception_carries_its_code(self, exc_cls, code):
|
||||
exc = exc_cls()
|
||||
assert exc.code == code
|
||||
assert exc.source == "Okta"
|
||||
assert exc.message # populated from OKTA_ERROR_CODES
|
||||
assert exc.remediation # populated from OKTA_ERROR_CODES
|
||||
|
||||
@pytest.mark.parametrize("exc_cls", list(EXPECTED_CODES.keys()))
|
||||
def test_custom_message_overrides_default(self, exc_cls):
|
||||
custom = "specific error context"
|
||||
exc = exc_cls(message=custom)
|
||||
assert exc.message == custom
|
||||
|
||||
def test_str_format_includes_class_code_and_message(self):
|
||||
exc = OktaInvalidOrgDomainError(message="bad url")
|
||||
rendered = str(exc)
|
||||
assert "OktaInvalidOrgDomainError" in rendered
|
||||
assert "[14004]" in rendered
|
||||
assert "bad url" in rendered
|
||||
|
||||
def test_original_exception_appended_to_str(self):
|
||||
original = ValueError("network down")
|
||||
exc = OktaSetUpIdentityError(original_exception=original)
|
||||
rendered = str(exc)
|
||||
assert "network down" in rendered
|
||||
|
||||
def test_can_be_raised_and_caught(self):
|
||||
with pytest.raises(OktaInvalidCredentialsError) as info:
|
||||
raise OktaInvalidCredentialsError(message="bad token")
|
||||
assert info.value.code == 14003
|
||||
assert "bad token" in str(info.value)
|
||||
|
||||
def test_caught_as_credentials_error_base(self):
|
||||
with pytest.raises(OktaCredentialsError):
|
||||
raise OktaPrivateKeyFileError(message="empty")
|
||||
|
||||
def test_caught_as_okta_base_exception(self):
|
||||
with pytest.raises(OktaBaseException):
|
||||
raise OktaEnvironmentVariableError(message="missing org url")
|
||||
@@ -0,0 +1,62 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from prowler.providers.okta.lib.arguments import arguments
|
||||
|
||||
|
||||
class TestOktaArguments:
|
||||
def setup_method(self):
|
||||
self.mock_parser = MagicMock()
|
||||
self.mock_subparsers = MagicMock()
|
||||
self.mock_okta_parser = MagicMock()
|
||||
|
||||
self.mock_parser.add_subparsers.return_value = self.mock_subparsers
|
||||
self.mock_subparsers.add_parser.return_value = self.mock_okta_parser
|
||||
|
||||
def test_init_parser_creates_subparser(self):
|
||||
mock_args = MagicMock()
|
||||
mock_args.subparsers = self.mock_subparsers
|
||||
mock_args.common_providers_parser = MagicMock()
|
||||
|
||||
arguments.init_parser(mock_args)
|
||||
|
||||
self.mock_subparsers.add_parser.assert_called_once_with(
|
||||
"okta",
|
||||
parents=[mock_args.common_providers_parser],
|
||||
help="Okta Provider",
|
||||
)
|
||||
|
||||
def test_init_parser_registers_non_secret_flags(self):
|
||||
mock_args = MagicMock()
|
||||
mock_args.subparsers = self.mock_subparsers
|
||||
mock_args.common_providers_parser = MagicMock()
|
||||
|
||||
auth_group = MagicMock()
|
||||
self.mock_okta_parser.add_argument_group.return_value = auth_group
|
||||
|
||||
arguments.init_parser(mock_args)
|
||||
|
||||
registered = {call.args[0] for call in auth_group.add_argument.call_args_list}
|
||||
assert registered == {
|
||||
"--okta-org-domain",
|
||||
"--okta-client-id",
|
||||
"--okta-scopes",
|
||||
}
|
||||
|
||||
def test_secret_flags_not_registered(self):
|
||||
"""Private key material must never be a CLI flag — env-only."""
|
||||
mock_args = MagicMock()
|
||||
mock_args.subparsers = self.mock_subparsers
|
||||
mock_args.common_providers_parser = MagicMock()
|
||||
|
||||
auth_group = MagicMock()
|
||||
self.mock_okta_parser.add_argument_group.return_value = auth_group
|
||||
|
||||
arguments.init_parser(mock_args)
|
||||
|
||||
registered = {call.args[0] for call in auth_group.add_argument.call_args_list}
|
||||
assert "--okta-private-key" not in registered
|
||||
assert "--okta-private-key-file" not in registered
|
||||
|
||||
def test_no_sensitive_arguments_constant(self):
|
||||
"""No SENSITIVE_ARGUMENTS frozenset needed — no secret flags exist."""
|
||||
assert not hasattr(arguments, "SENSITIVE_ARGUMENTS")
|
||||
@@ -0,0 +1,9 @@
|
||||
Mutelist:
|
||||
Accounts:
|
||||
"acme.okta.com":
|
||||
Checks:
|
||||
"signon_global_session_idle_timeout_15min":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "pol-default"
|
||||
@@ -0,0 +1,104 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import yaml
|
||||
|
||||
from prowler.providers.okta.lib.mutelist.mutelist import OktaMutelist
|
||||
|
||||
MUTELIST_FIXTURE_PATH = "tests/providers/okta/lib/mutelist/fixtures/okta_mutelist.yaml"
|
||||
|
||||
|
||||
class TestOktaMutelist:
|
||||
def test_get_mutelist_file_from_local_file(self):
|
||||
mutelist = OktaMutelist(mutelist_path=MUTELIST_FIXTURE_PATH)
|
||||
|
||||
with open(MUTELIST_FIXTURE_PATH) as f:
|
||||
mutelist_fixture = yaml.safe_load(f)["Mutelist"]
|
||||
|
||||
assert mutelist.mutelist == mutelist_fixture
|
||||
assert mutelist.mutelist_file_path == MUTELIST_FIXTURE_PATH
|
||||
|
||||
def test_get_mutelist_file_from_local_file_non_existent(self):
|
||||
mutelist_path = "tests/providers/okta/lib/mutelist/fixtures/not_present"
|
||||
mutelist = OktaMutelist(mutelist_path=mutelist_path)
|
||||
|
||||
assert mutelist.mutelist == {}
|
||||
assert mutelist.mutelist_file_path == mutelist_path
|
||||
|
||||
def test_validate_mutelist_not_valid_key(self):
|
||||
with open(MUTELIST_FIXTURE_PATH) as f:
|
||||
mutelist_fixture = yaml.safe_load(f)["Mutelist"]
|
||||
|
||||
mutelist_fixture["Accounts1"] = mutelist_fixture["Accounts"]
|
||||
del mutelist_fixture["Accounts"]
|
||||
|
||||
mutelist = OktaMutelist(mutelist_content=mutelist_fixture)
|
||||
|
||||
assert len(mutelist.validate_mutelist(mutelist_fixture)) == 0
|
||||
assert mutelist.mutelist == {}
|
||||
assert mutelist.mutelist_file_path is None
|
||||
|
||||
def test_is_finding_muted_match(self):
|
||||
mutelist_content = {
|
||||
"Accounts": {
|
||||
"acme.okta.com": {
|
||||
"Checks": {
|
||||
"signon_global_session_idle_timeout_15min": {
|
||||
"Regions": ["*"],
|
||||
"Resources": ["Default Policy"],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutelist = OktaMutelist(mutelist_content=mutelist_content)
|
||||
|
||||
finding = MagicMock()
|
||||
finding.check_metadata.CheckID = "signon_global_session_idle_timeout_15min"
|
||||
finding.resource_name = "Default Policy"
|
||||
finding.resource_tags = []
|
||||
|
||||
assert mutelist.is_finding_muted(finding, org_domain="acme.okta.com") is True
|
||||
|
||||
def test_is_finding_muted_no_match(self):
|
||||
mutelist_content = {
|
||||
"Accounts": {
|
||||
"acme.okta.com": {
|
||||
"Checks": {
|
||||
"signon_global_session_idle_timeout_15min": {
|
||||
"Regions": ["*"],
|
||||
"Resources": ["Default Policy"],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutelist = OktaMutelist(mutelist_content=mutelist_content)
|
||||
|
||||
finding = MagicMock()
|
||||
finding.check_metadata.CheckID = "signon_global_session_idle_timeout_15min"
|
||||
finding.resource_name = "Some Other Policy"
|
||||
finding.resource_tags = []
|
||||
|
||||
assert mutelist.is_finding_muted(finding, org_domain="acme.okta.com") is False
|
||||
|
||||
def test_is_finding_muted_no_match_on_different_org(self):
|
||||
mutelist_content = {
|
||||
"Accounts": {
|
||||
"acme.okta.com": {
|
||||
"Checks": {
|
||||
"signon_global_session_idle_timeout_15min": {
|
||||
"Regions": ["*"],
|
||||
"Resources": ["*"],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutelist = OktaMutelist(mutelist_content=mutelist_content)
|
||||
|
||||
finding = MagicMock()
|
||||
finding.check_metadata.CheckID = "signon_global_session_idle_timeout_15min"
|
||||
finding.resource_name = "Default Policy"
|
||||
finding.resource_tags = []
|
||||
|
||||
assert mutelist.is_finding_muted(finding, org_domain="other.okta.com") is False
|
||||
@@ -0,0 +1,34 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from prowler.providers.okta.models import OktaIdentityInfo, OktaSession
|
||||
|
||||
OKTA_ORG_DOMAIN = "acme.okta.com"
|
||||
OKTA_CLIENT_ID = "0oa1234567890abcdef"
|
||||
OKTA_PRIVATE_KEY = "-----BEGIN PRIVATE KEY-----\nMOCK\n-----END PRIVATE KEY-----"
|
||||
|
||||
|
||||
def set_mocked_okta_provider(
|
||||
session: OktaSession = None,
|
||||
identity: OktaIdentityInfo = None,
|
||||
audit_config: dict = None,
|
||||
):
|
||||
if session is None:
|
||||
session = OktaSession(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
scopes=["okta.policies.read"],
|
||||
private_key=OKTA_PRIVATE_KEY,
|
||||
)
|
||||
if identity is None:
|
||||
identity = OktaIdentityInfo(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
)
|
||||
|
||||
provider = MagicMock()
|
||||
provider.type = "okta"
|
||||
provider.auth_method = "OAuth 2.0 (private-key JWT)"
|
||||
provider.session = session
|
||||
provider.identity = identity
|
||||
provider.audit_config = audit_config or {}
|
||||
return provider
|
||||
@@ -0,0 +1,422 @@
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from prowler.providers.okta.exceptions.exceptions import (
|
||||
OktaEnvironmentVariableError,
|
||||
OktaInsufficientPermissionsError,
|
||||
OktaInvalidCredentialsError,
|
||||
OktaInvalidOrgDomainError,
|
||||
OktaPrivateKeyFileError,
|
||||
OktaSetUpIdentityError,
|
||||
)
|
||||
from prowler.providers.okta.models import OktaIdentityInfo, OktaSession
|
||||
from prowler.providers.okta.okta_provider import DEFAULT_SCOPES, OktaProvider
|
||||
from tests.providers.okta.okta_fixtures import (
|
||||
OKTA_CLIENT_ID,
|
||||
OKTA_ORG_DOMAIN,
|
||||
OKTA_PRIVATE_KEY,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def _clear_okta_env(monkeypatch):
|
||||
for var in (
|
||||
"OKTA_ORG_DOMAIN",
|
||||
"OKTA_CLIENT_ID",
|
||||
"OKTA_PRIVATE_KEY",
|
||||
"OKTA_PRIVATE_KEY_FILE",
|
||||
"OKTA_SCOPES",
|
||||
):
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
|
||||
|
||||
class Test_OktaProvider_validate_arguments:
|
||||
def test_missing_all_three_raises_combined(self, _clear_okta_env):
|
||||
with pytest.raises(OktaEnvironmentVariableError) as exc:
|
||||
OktaProvider.validate_arguments()
|
||||
msg = str(exc.value)
|
||||
assert "OKTA_ORG_DOMAIN" in msg
|
||||
assert "OKTA_CLIENT_ID" in msg
|
||||
assert "OKTA_PRIVATE_KEY" in msg
|
||||
|
||||
def test_only_org_domain_missing(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
with pytest.raises(OktaEnvironmentVariableError) as exc:
|
||||
OktaProvider.validate_arguments(
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key_file=str(key_file),
|
||||
)
|
||||
assert "OKTA_ORG_DOMAIN" in str(exc.value)
|
||||
|
||||
def test_accepts_private_key_content_in_place_of_file(self, _clear_okta_env):
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=OKTA_ORG_DOMAIN,
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key=OKTA_PRIVATE_KEY,
|
||||
)
|
||||
|
||||
def test_all_present_via_args(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
OktaProvider.validate_arguments(
|
||||
okta_org_domain=OKTA_ORG_DOMAIN,
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key_file=str(key_file),
|
||||
)
|
||||
|
||||
def test_all_present_via_env(self, monkeypatch, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
monkeypatch.setenv("OKTA_ORG_DOMAIN", OKTA_ORG_DOMAIN)
|
||||
monkeypatch.setenv("OKTA_CLIENT_ID", OKTA_CLIENT_ID)
|
||||
monkeypatch.setenv("OKTA_PRIVATE_KEY_FILE", str(key_file))
|
||||
OktaProvider.validate_arguments()
|
||||
|
||||
|
||||
class Test_OktaProvider_setup_session:
|
||||
def test_rejects_domain_with_scheme(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
with pytest.raises(OktaInvalidOrgDomainError):
|
||||
OktaProvider.setup_session(
|
||||
org_domain="https://acme.okta.com",
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
|
||||
def test_rejects_domain_with_trailing_slash(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
with pytest.raises(OktaInvalidOrgDomainError):
|
||||
OktaProvider.setup_session(
|
||||
org_domain="acme.okta.com/",
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
|
||||
def test_rejects_non_okta_tld(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
with pytest.raises(OktaInvalidOrgDomainError):
|
||||
OktaProvider.setup_session(
|
||||
org_domain="login.example.com",
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
|
||||
def test_accepts_all_okta_managed_tlds(self, _clear_okta_env, tmp_path):
|
||||
# Mirrors the domain whitelist used by the Okta SDK
|
||||
# (okta.config.config_validator) so that gov/mil tenants — exactly the
|
||||
# audience most likely to care about the DISA STIG check — are not
|
||||
# turned away at provider init.
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
for domain in (
|
||||
"acme.oktapreview.com",
|
||||
"acme.okta-emea.com",
|
||||
"acme.okta-gov.com",
|
||||
"acme.okta.mil",
|
||||
"acme.okta-miltest.com",
|
||||
"acme.trex-govcloud.com",
|
||||
):
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=domain,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
assert session.org_domain == domain
|
||||
|
||||
def test_unreadable_private_key_file_raises(self, _clear_okta_env):
|
||||
with pytest.raises(OktaPrivateKeyFileError):
|
||||
OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file="/nonexistent/path.pem",
|
||||
)
|
||||
|
||||
def test_happy_path_uses_default_scopes(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
assert session.org_domain == OKTA_ORG_DOMAIN
|
||||
assert session.client_id == OKTA_CLIENT_ID
|
||||
assert session.private_key == OKTA_PRIVATE_KEY
|
||||
assert session.scopes == DEFAULT_SCOPES
|
||||
|
||||
def test_custom_scopes_parsed_from_csv(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
scopes="okta.policies.read, okta.apps.read ,okta.users.read",
|
||||
)
|
||||
assert session.scopes == [
|
||||
"okta.policies.read",
|
||||
"okta.apps.read",
|
||||
"okta.users.read",
|
||||
]
|
||||
|
||||
def test_custom_scopes_accepts_list_input(self, _clear_okta_env, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
scopes=["okta.policies.read", "okta.apps.read", "okta.users.read"],
|
||||
)
|
||||
assert session.scopes == [
|
||||
"okta.policies.read",
|
||||
"okta.apps.read",
|
||||
"okta.users.read",
|
||||
]
|
||||
|
||||
def test_custom_scopes_flattens_mixed_list_and_csv(self, _clear_okta_env, tmp_path):
|
||||
# Mirrors how argparse nargs="+" delivers values when a user
|
||||
# passes "--okta-scopes a,b c" — a list whose first element still
|
||||
# contains a comma.
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
scopes=["okta.policies.read,okta.apps.read", "okta.users.read"],
|
||||
)
|
||||
assert session.scopes == [
|
||||
"okta.policies.read",
|
||||
"okta.apps.read",
|
||||
"okta.users.read",
|
||||
]
|
||||
|
||||
def test_org_domain_normalized_lowercase_and_trimmed(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
# The provider lowercases and strips whitespace so that
|
||||
# " ACME.okta.com " is accepted as "acme.okta.com".
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=" ACME.okta.com ",
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
assert session.org_domain == OKTA_ORG_DOMAIN
|
||||
|
||||
def test_accepts_private_key_via_content_arg(self, _clear_okta_env):
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key=OKTA_PRIVATE_KEY,
|
||||
)
|
||||
assert session.private_key == OKTA_PRIVATE_KEY
|
||||
|
||||
def test_accepts_private_key_via_env_var(self, monkeypatch):
|
||||
monkeypatch.setenv("OKTA_PRIVATE_KEY", OKTA_PRIVATE_KEY)
|
||||
monkeypatch.delenv("OKTA_PRIVATE_KEY_FILE", raising=False)
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
)
|
||||
assert session.private_key == OKTA_PRIVATE_KEY
|
||||
|
||||
def test_content_takes_precedence_over_file(self, _clear_okta_env, tmp_path):
|
||||
# File has stale content; explicit content arg should win.
|
||||
key_file = tmp_path / "stale.pem"
|
||||
key_file.write_text("STALE CONTENT FROM FILE")
|
||||
fresh_key = "-----BEGIN PRIVATE KEY-----\nFRESH\n-----END PRIVATE KEY-----"
|
||||
session = OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key=fresh_key,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
assert session.private_key == fresh_key
|
||||
|
||||
|
||||
class Test_OktaProvider_setup_identity:
|
||||
def _session(self, tmp_path):
|
||||
key_file = tmp_path / "key.pem"
|
||||
key_file.write_text(OKTA_PRIVATE_KEY)
|
||||
return OktaProvider.setup_session(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
private_key_file=str(key_file),
|
||||
)
|
||||
|
||||
def test_synthesizes_identity_and_probes_successfully(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
session = self._session(tmp_path)
|
||||
|
||||
async def fake_list_policies(*_a, **_k):
|
||||
return ([], mock.MagicMock(headers={}), None)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.okta_provider.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = fake_list_policies
|
||||
mocked_client_cls.return_value = mocked
|
||||
identity = OktaProvider.setup_identity(session)
|
||||
|
||||
assert identity.org_domain == OKTA_ORG_DOMAIN
|
||||
assert identity.client_id == OKTA_CLIENT_ID
|
||||
|
||||
def test_raises_invalid_credentials_when_probe_returns_error(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
session = self._session(tmp_path)
|
||||
|
||||
async def failing_list_policies(*_a, **_k):
|
||||
return ([], None, Exception("E0000011: Invalid token"))
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.okta_provider.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = failing_list_policies
|
||||
mocked_client_cls.return_value = mocked
|
||||
with pytest.raises(OktaInvalidCredentialsError):
|
||||
OktaProvider.setup_identity(session)
|
||||
|
||||
def test_raises_insufficient_permissions_on_scope_error(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
session = self._session(tmp_path)
|
||||
|
||||
async def failing_list_policies(*_a, **_k):
|
||||
return ([], None, Exception("invalid_scope: policies.read missing"))
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.okta_provider.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = failing_list_policies
|
||||
mocked_client_cls.return_value = mocked
|
||||
with pytest.raises(OktaInsufficientPermissionsError):
|
||||
OktaProvider.setup_identity(session)
|
||||
|
||||
def test_raises_insufficient_permissions_on_forbidden(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
session = self._session(tmp_path)
|
||||
|
||||
async def failing_list_policies(*_a, **_k):
|
||||
return ([], None, Exception("403 Forbidden"))
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.okta_provider.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = failing_list_policies
|
||||
mocked_client_cls.return_value = mocked
|
||||
with pytest.raises(OktaInsufficientPermissionsError):
|
||||
OktaProvider.setup_identity(session)
|
||||
|
||||
def test_wraps_unexpected_errors_in_setup_identity_error(
|
||||
self, _clear_okta_env, tmp_path
|
||||
):
|
||||
session = self._session(tmp_path)
|
||||
|
||||
async def boom(*_a, **_k):
|
||||
raise RuntimeError("network down")
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.okta_provider.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = boom
|
||||
mocked_client_cls.return_value = mocked
|
||||
with pytest.raises(OktaSetUpIdentityError):
|
||||
OktaProvider.setup_identity(session)
|
||||
|
||||
|
||||
def _mock_setup_paths():
|
||||
"""Patches that bypass the real SDK during provider construction."""
|
||||
session = OktaSession(
|
||||
org_domain=OKTA_ORG_DOMAIN,
|
||||
client_id=OKTA_CLIENT_ID,
|
||||
scopes=list(DEFAULT_SCOPES),
|
||||
private_key=OKTA_PRIVATE_KEY,
|
||||
)
|
||||
identity = OktaIdentityInfo(org_domain=OKTA_ORG_DOMAIN, client_id=OKTA_CLIENT_ID)
|
||||
return (
|
||||
mock.patch.object(OktaProvider, "validate_arguments"),
|
||||
mock.patch.object(OktaProvider, "setup_session", return_value=session),
|
||||
mock.patch.object(OktaProvider, "setup_identity", return_value=identity),
|
||||
)
|
||||
|
||||
|
||||
class Test_OktaProvider_init:
|
||||
def test_init_end_to_end(self, _clear_okta_env, tmp_path):
|
||||
validate_p, session_p, identity_p = _mock_setup_paths()
|
||||
with validate_p, session_p, identity_p:
|
||||
provider = OktaProvider(
|
||||
okta_org_domain=OKTA_ORG_DOMAIN,
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key_file="/tmp/key.pem",
|
||||
)
|
||||
|
||||
assert provider.type == "okta"
|
||||
assert provider.auth_method == "OAuth 2.0 (private-key JWT)"
|
||||
assert provider.identity.org_domain == OKTA_ORG_DOMAIN
|
||||
assert provider.identity.client_id == OKTA_CLIENT_ID
|
||||
assert provider.session.scopes == DEFAULT_SCOPES
|
||||
assert provider.audit_config is not None
|
||||
assert provider.mutelist is not None
|
||||
|
||||
|
||||
class Test_OktaProvider_test_connection:
|
||||
def test_success(self, _clear_okta_env, tmp_path):
|
||||
validate_p, session_p, identity_p = _mock_setup_paths()
|
||||
with validate_p, session_p, identity_p:
|
||||
connection = OktaProvider.test_connection(
|
||||
okta_org_domain=OKTA_ORG_DOMAIN,
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key_file="/tmp/key.pem",
|
||||
)
|
||||
assert connection.is_connected is True
|
||||
assert connection.error is None
|
||||
|
||||
def test_returns_error_when_raise_disabled(self, _clear_okta_env):
|
||||
connection = OktaProvider.test_connection(raise_on_exception=False)
|
||||
assert connection.is_connected is False
|
||||
assert connection.error is not None
|
||||
|
||||
def test_raises_when_raise_enabled(self, _clear_okta_env):
|
||||
with pytest.raises(OktaEnvironmentVariableError):
|
||||
OktaProvider.test_connection()
|
||||
|
||||
|
||||
class Test_OktaProvider_print_credentials:
|
||||
def test_invokes_print_boxes_with_org_and_client(self, _clear_okta_env, tmp_path):
|
||||
validate_p, session_p, identity_p = _mock_setup_paths()
|
||||
with (
|
||||
validate_p,
|
||||
session_p,
|
||||
identity_p,
|
||||
mock.patch(
|
||||
"prowler.providers.okta.okta_provider.print_boxes"
|
||||
) as mock_print,
|
||||
):
|
||||
provider = OktaProvider(
|
||||
okta_org_domain=OKTA_ORG_DOMAIN,
|
||||
okta_client_id=OKTA_CLIENT_ID,
|
||||
okta_private_key_file="/tmp/key.pem",
|
||||
)
|
||||
provider.print_credentials()
|
||||
|
||||
mock_print.assert_called_once()
|
||||
rendered = " ".join(mock_print.call_args.args[0])
|
||||
assert OKTA_ORG_DOMAIN in rendered
|
||||
assert OKTA_CLIENT_ID in rendered
|
||||
assert "OAuth 2.0" in rendered
|
||||
@@ -0,0 +1,304 @@
|
||||
from unittest import mock
|
||||
|
||||
from prowler.providers.okta.services.signon.signon_service import (
|
||||
GlobalSessionPolicy,
|
||||
GlobalSessionPolicyRule,
|
||||
)
|
||||
from tests.providers.okta.okta_fixtures import set_mocked_okta_provider
|
||||
|
||||
CHECK_PATH = (
|
||||
"prowler.providers.okta.services.signon."
|
||||
"signon_global_session_idle_timeout_15min."
|
||||
"signon_global_session_idle_timeout_15min.signon_client"
|
||||
)
|
||||
|
||||
|
||||
def _build_signon_client(policies, audit_config: dict = None):
|
||||
client = mock.MagicMock()
|
||||
client.global_session_policies = policies
|
||||
client.provider = set_mocked_okta_provider()
|
||||
client.audit_config = audit_config or {}
|
||||
return client
|
||||
|
||||
|
||||
def _default_policy(rules):
|
||||
return GlobalSessionPolicy(
|
||||
id="pol-default",
|
||||
name="Default Policy",
|
||||
priority=99,
|
||||
status="ACTIVE",
|
||||
is_default=True,
|
||||
rules=rules,
|
||||
)
|
||||
|
||||
|
||||
def _custom_policy(rules):
|
||||
return GlobalSessionPolicy(
|
||||
id="pol-custom",
|
||||
name="Admins Policy",
|
||||
priority=1,
|
||||
status="ACTIVE",
|
||||
is_default=False,
|
||||
rules=rules,
|
||||
)
|
||||
|
||||
|
||||
def _default_rule(idle_min=480, priority=2, status="ACTIVE"):
|
||||
return GlobalSessionPolicyRule(
|
||||
id="rule-default",
|
||||
name="Default Rule",
|
||||
priority=priority,
|
||||
status=status,
|
||||
is_default=True,
|
||||
max_session_idle_minutes=idle_min,
|
||||
)
|
||||
|
||||
|
||||
def _non_default_rule(name, idle_min, priority=1, status="ACTIVE"):
|
||||
return GlobalSessionPolicyRule(
|
||||
id=f"rule-{name.lower().replace(' ', '-')}",
|
||||
name=name,
|
||||
priority=priority,
|
||||
status=status,
|
||||
is_default=False,
|
||||
max_session_idle_minutes=idle_min,
|
||||
)
|
||||
|
||||
|
||||
class Test_signon_global_session_idle_timeout_15min:
|
||||
def test_no_policies(self):
|
||||
signon_client = _build_signon_client({})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "was not found" in findings[0].status_extended
|
||||
|
||||
def test_pass_when_priority_one_non_default_rule_is_compliant(self):
|
||||
policy = _default_policy(
|
||||
[
|
||||
_non_default_rule("Strict 15min", 15, priority=1),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "PASS"
|
||||
assert "Strict 15min" in findings[0].status_extended
|
||||
assert "Priority 1 non-default rule" in findings[0].status_extended
|
||||
|
||||
def test_fail_when_only_default_rule(self):
|
||||
policy = _default_policy([_default_rule(priority=1)])
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "uses 'Default Rule' as its active Priority 1 rule" in (
|
||||
findings[0].status_extended
|
||||
)
|
||||
|
||||
def test_fail_when_priority_one_non_default_rule_has_null_idle(self):
|
||||
# Rules without a session block leave max_session_idle_minutes as
|
||||
# None. The check must treat those as non-compliant — they cannot
|
||||
# enforce any timeout.
|
||||
policy = _default_policy(
|
||||
[
|
||||
GlobalSessionPolicyRule(
|
||||
id="rule-no-session",
|
||||
name="No Session Block",
|
||||
priority=1,
|
||||
status="ACTIVE",
|
||||
is_default=False,
|
||||
max_session_idle_minutes=None,
|
||||
),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "No Session Block" in findings[0].status_extended
|
||||
assert "does not define" in findings[0].status_extended
|
||||
|
||||
def test_fail_when_priority_one_non_default_rule_exceeds_threshold(self):
|
||||
policy = _default_policy(
|
||||
[
|
||||
_non_default_rule("Loose 60min", 60, priority=1),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "Loose 60min" in findings[0].status_extended
|
||||
assert "exceeding the configured threshold" in findings[0].status_extended
|
||||
|
||||
def test_fail_when_compliant_non_default_rule_is_not_priority_one(self):
|
||||
policy = _default_policy(
|
||||
[
|
||||
_default_rule(priority=1),
|
||||
_non_default_rule("Strict 15min", 15, priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "uses 'Default Rule' as its active Priority 1 rule" in (
|
||||
findings[0].status_extended
|
||||
)
|
||||
|
||||
def test_ignores_other_custom_policies(self):
|
||||
default_policy = _default_policy(
|
||||
[
|
||||
_non_default_rule("Strict 15min", 15, priority=1),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
custom_policy = _custom_policy(
|
||||
[
|
||||
_non_default_rule("Loose Admin Rule", 60, priority=1),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client(
|
||||
{"pol-custom": custom_policy, "pol-default": default_policy}
|
||||
)
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "PASS"
|
||||
assert findings[0].resource_name == "Default Policy"
|
||||
|
||||
def test_fail_when_default_policy_is_inactive(self):
|
||||
policy = GlobalSessionPolicy(
|
||||
id="pol-default",
|
||||
name="Default Policy",
|
||||
priority=99,
|
||||
status="INACTIVE",
|
||||
is_default=True,
|
||||
rules=[_non_default_rule("Strict 15min", 15, priority=1)],
|
||||
)
|
||||
signon_client = _build_signon_client({"pol-default": policy})
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "FAIL"
|
||||
assert "status 'INACTIVE'" in findings[0].status_extended
|
||||
|
||||
def test_threshold_overridden_via_audit_config(self):
|
||||
# 30-minute rule fails the STIG default of 15, but passes a relaxed
|
||||
# threshold of 60 minutes set in audit_config.
|
||||
policy = _default_policy(
|
||||
[
|
||||
_non_default_rule("Relaxed 30min", 30, priority=1),
|
||||
_default_rule(priority=2),
|
||||
]
|
||||
)
|
||||
signon_client = _build_signon_client(
|
||||
{"pol-default": policy},
|
||||
audit_config={"okta_max_session_idle_minutes": 60},
|
||||
)
|
||||
with (
|
||||
mock.patch(
|
||||
"prowler.providers.common.provider.Provider.get_global_provider",
|
||||
return_value=set_mocked_okta_provider(),
|
||||
),
|
||||
mock.patch(CHECK_PATH, new=signon_client),
|
||||
):
|
||||
from prowler.providers.okta.services.signon.signon_global_session_idle_timeout_15min.signon_global_session_idle_timeout_15min import (
|
||||
signon_global_session_idle_timeout_15min,
|
||||
)
|
||||
|
||||
findings = signon_global_session_idle_timeout_15min().execute()
|
||||
assert len(findings) == 1
|
||||
assert findings[0].status == "PASS"
|
||||
assert "threshold of 60 minutes" in findings[0].status_extended
|
||||
@@ -0,0 +1,163 @@
|
||||
from unittest import mock
|
||||
|
||||
from prowler.providers.okta.services.signon.signon_service import (
|
||||
GlobalSessionPolicy,
|
||||
GlobalSessionPolicyRule,
|
||||
Signon,
|
||||
_next_after_cursor,
|
||||
)
|
||||
from tests.providers.okta.okta_fixtures import set_mocked_okta_provider
|
||||
|
||||
|
||||
def _fake_policy(
|
||||
policy_id: str,
|
||||
name: str,
|
||||
system: bool = True,
|
||||
priority: int | None = 1,
|
||||
status: str = "ACTIVE",
|
||||
):
|
||||
p = mock.MagicMock()
|
||||
p.id = policy_id
|
||||
p.name = name
|
||||
p.priority = priority
|
||||
p.status = status
|
||||
p.system = system
|
||||
return p
|
||||
|
||||
|
||||
def _fake_rule(
|
||||
rule_id: str,
|
||||
name: str,
|
||||
*,
|
||||
system: bool = False,
|
||||
priority: int | None = 1,
|
||||
status: str = "ACTIVE",
|
||||
max_session_idle_minutes: int = None,
|
||||
):
|
||||
r = mock.MagicMock()
|
||||
r.id = rule_id
|
||||
r.name = name
|
||||
r.priority = priority
|
||||
r.status = status
|
||||
r.system = system
|
||||
r.actions.signon.session.max_session_idle_minutes = max_session_idle_minutes
|
||||
r.actions.signon.session.max_session_lifetime_minutes = None
|
||||
r.actions.signon.session.use_persistent_cookie = None
|
||||
r.conditions.network.include = []
|
||||
r.conditions.network.exclude = []
|
||||
return r
|
||||
|
||||
|
||||
def _resp(headers: dict = None):
|
||||
r = mock.MagicMock()
|
||||
r.headers = headers or {}
|
||||
return r
|
||||
|
||||
|
||||
class Test_next_after_cursor:
|
||||
def test_no_resp_returns_none(self):
|
||||
assert _next_after_cursor(None) is None
|
||||
|
||||
def test_no_link_header_returns_none(self):
|
||||
assert _next_after_cursor(_resp({})) is None
|
||||
|
||||
def test_extracts_after_param(self):
|
||||
link = (
|
||||
'<https://acme.okta.com/api/v1/policies?limit=20>; rel="self", '
|
||||
'<https://acme.okta.com/api/v1/policies?after=abc123&limit=20>; rel="next"'
|
||||
)
|
||||
assert _next_after_cursor(_resp({"link": link})) == "abc123"
|
||||
|
||||
def test_link_without_next_returns_none(self):
|
||||
link = '<https://acme.okta.com/api/v1/policies?limit=20>; rel="self"'
|
||||
assert _next_after_cursor(_resp({"link": link})) is None
|
||||
|
||||
|
||||
class Test_Signon_service:
|
||||
def test_fetches_policies_and_rules(self):
|
||||
provider = set_mocked_okta_provider()
|
||||
|
||||
policy = _fake_policy("pol-default", "Default Policy", system=True)
|
||||
rule_default = _fake_rule(
|
||||
"rule-default", "Default Rule", system=True, max_session_idle_minutes=480
|
||||
)
|
||||
rule_compliant = _fake_rule(
|
||||
"rule-15", "Strict 15min", system=False, max_session_idle_minutes=15
|
||||
)
|
||||
|
||||
async def fake_list_policies(*_a, **_k):
|
||||
return ([policy], _resp({}), None)
|
||||
|
||||
async def fake_list_rules(*_a, **_k):
|
||||
return ([rule_default, rule_compliant], _resp({}), None)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.lib.service.service.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = fake_list_policies
|
||||
mocked.list_policy_rules = fake_list_rules
|
||||
mocked_client_cls.return_value = mocked
|
||||
|
||||
service = Signon(provider)
|
||||
|
||||
assert "pol-default" in service.global_session_policies
|
||||
policy_obj = service.global_session_policies["pol-default"]
|
||||
assert isinstance(policy_obj, GlobalSessionPolicy)
|
||||
assert policy_obj.is_default is True
|
||||
assert policy_obj.priority == 1
|
||||
assert policy_obj.status == "ACTIVE"
|
||||
assert len(policy_obj.rules) == 2
|
||||
rules_by_name = {r.name: r for r in policy_obj.rules}
|
||||
assert isinstance(rules_by_name["Default Rule"], GlobalSessionPolicyRule)
|
||||
assert rules_by_name["Default Rule"].is_default is True
|
||||
assert rules_by_name["Default Rule"].priority == 1
|
||||
assert rules_by_name["Default Rule"].status == "ACTIVE"
|
||||
assert rules_by_name["Strict 15min"].is_default is False
|
||||
assert rules_by_name["Strict 15min"].max_session_idle_minutes == 15
|
||||
|
||||
def test_paginates_via_link_header(self):
|
||||
provider = set_mocked_okta_provider()
|
||||
|
||||
page1_policy = _fake_policy("pol-1", "Default Policy")
|
||||
page2_policy = _fake_policy("pol-2", "Custom Policy", system=False)
|
||||
next_link = '<https://acme.okta.com/api/v1/policies?after=cursor-2>; rel="next"'
|
||||
|
||||
calls = []
|
||||
|
||||
async def fake_list_policies(*_a, **kwargs):
|
||||
calls.append(kwargs.get("after"))
|
||||
if kwargs.get("after") is None:
|
||||
return ([page1_policy], _resp({"link": next_link}), None)
|
||||
return ([page2_policy], _resp({}), None)
|
||||
|
||||
async def fake_list_rules(*_a, **_k):
|
||||
return ([], _resp({}), None)
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.lib.service.service.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = fake_list_policies
|
||||
mocked.list_policy_rules = fake_list_rules
|
||||
mocked_client_cls.return_value = mocked
|
||||
service = Signon(provider)
|
||||
|
||||
assert calls == [None, "cursor-2"]
|
||||
assert set(service.global_session_policies.keys()) == {"pol-1", "pol-2"}
|
||||
|
||||
def test_returns_empty_on_api_error(self):
|
||||
provider = set_mocked_okta_provider()
|
||||
|
||||
async def failing(*_a, **_k):
|
||||
return ([], _resp({}), Exception("E0000007: not found"))
|
||||
|
||||
with mock.patch(
|
||||
"prowler.providers.okta.lib.service.service.OktaSDKClient"
|
||||
) as mocked_client_cls:
|
||||
mocked = mock.MagicMock()
|
||||
mocked.list_policies = failing
|
||||
mocked_client_cls.return_value = mocked
|
||||
service = Signon(provider)
|
||||
|
||||
assert service.global_session_policies == {}
|
||||
@@ -226,5 +226,6 @@ pnpm run test:e2e:ui
|
||||
- [ ] Relevant E2E tests pass
|
||||
- [ ] All UI states handled (loading, error, empty)
|
||||
- [ ] No secrets in code (use `.env.local`)
|
||||
- [ ] New npm dependencies include package-health evidence (maintenance, popularity, known vulnerabilities, license, release age) and a rationale for not using existing/native alternatives.
|
||||
- [ ] Error messages sanitized
|
||||
- [ ] Server-side validation present
|
||||
|
||||
@@ -11,6 +11,7 @@ All notable changes to the **Prowler UI** are documented in this file.
|
||||
### 🔄 Changed
|
||||
|
||||
- Trimmed unused npm dependencies [(#11115)](https://github.com/prowler-cloud/prowler/pull/11115)
|
||||
- Lighthouse now accepts Prowler App Finding Groups MCP tools [(#11140)](https://github.com/prowler-cloud/prowler/pull/11140)
|
||||
- Attack Paths graph now uses React Flow with improved layout, interactions, export, minimap, and browser test coverage [(#10686)](https://github.com/prowler-cloud/prowler/pull/10686)
|
||||
- SAML ACS URL is only shown if the email domain is configured [(#11144)](https://github.com/prowler-cloud/prowler/pull/11144)
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.background).toBeTruthy();
|
||||
expect(graph.minimap).toBeTruthy();
|
||||
@@ -91,7 +91,7 @@ describe("running a query", () => {
|
||||
test("nodes are laid out at distinct positions", async ({ mountWith }) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
const positions = graph.nodePositions;
|
||||
expect(positions.some((p) => p.x !== 0 || p.y !== 0)).toBe(true);
|
||||
@@ -102,7 +102,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(1);
|
||||
await graph.waitForGraphStable(1);
|
||||
|
||||
expect(graph.toolbar.zoomInButton).toBeTruthy();
|
||||
expect(graph.toolbar.zoomOutButton).toBeTruthy();
|
||||
@@ -115,7 +115,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
expect(graph.findingNodes.length).toBeGreaterThan(0);
|
||||
@@ -128,7 +128,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
expect(graph.findingEdges.length).toBeGreaterThan(0);
|
||||
@@ -138,7 +138,7 @@ describe("running a query", () => {
|
||||
test("edges connect string source and target ids", async ({ mountWith }) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(2);
|
||||
await graph.waitForGraphStable(2, 1);
|
||||
|
||||
const edgeIds = graph.renderedEdgeIds;
|
||||
expect(edgeIds.length).toBeGreaterThan(0);
|
||||
@@ -153,7 +153,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith(fixtures.singleNode());
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(1);
|
||||
await graph.waitForGraphStable(1);
|
||||
expect(graph.nodes).toHaveLength(1);
|
||||
});
|
||||
|
||||
@@ -175,7 +175,7 @@ describe("running a query", () => {
|
||||
const graph = await mountWith(fixtures.large(200));
|
||||
const start = performance.now();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(1);
|
||||
await graph.waitForGraphStable(1);
|
||||
const elapsed = performance.now() - start;
|
||||
expect(elapsed).toBeLessThan(5000);
|
||||
});
|
||||
@@ -183,7 +183,7 @@ describe("running a query", () => {
|
||||
test("disconnected components are both visible", async ({ mountWith }) => {
|
||||
const graph = await mountWith(fixtures.disconnected());
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(4);
|
||||
await graph.waitForGraphStable(4);
|
||||
expect(graph.nodes.length).toBe(4);
|
||||
});
|
||||
|
||||
@@ -192,7 +192,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith(fixtures.resourcesOnly());
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
expect(graph.findingNodes.length).toBe(0);
|
||||
expect(graph.resourceNodes.length).toBe(3);
|
||||
});
|
||||
@@ -202,7 +202,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith(fixtures.findingsOnly());
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.findingNodes.length).toBe(3);
|
||||
expect(graph.resourceNodes.length).toBe(0);
|
||||
@@ -239,7 +239,7 @@ describe("running a query", () => {
|
||||
|
||||
const graph = await mountWith(fixture);
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
// Then - hidden findings do not influence initial resource coordinates.
|
||||
for (const node of visibleNodes) {
|
||||
@@ -254,7 +254,7 @@ describe("running a query", () => {
|
||||
}) => {
|
||||
const graph = await mountWith(fixtures.edgeCases());
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(5);
|
||||
await graph.waitForGraphStable(5);
|
||||
|
||||
expect(graph.nodes.length).toBe(7);
|
||||
expect(graph.containsText(/🔒-secure-bucket-日本語/)).toBe(true);
|
||||
@@ -267,7 +267,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
expect(graph.isInFilteredView).toBe(false);
|
||||
@@ -284,7 +284,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.findingNodes.length).toBe(0);
|
||||
expect(graph.hasNodeDetailsModal).toBe(false);
|
||||
@@ -301,7 +301,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
await graph.clickFirstResourceNode();
|
||||
expect(graph.findingNodes.length).toBeGreaterThan(0);
|
||||
@@ -318,7 +318,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
const initialViewport = graph.viewportTransform;
|
||||
|
||||
@@ -344,7 +344,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
await graph.clickFirstResourceNode();
|
||||
expect(graph.findingNodes.length).toBeGreaterThan(0);
|
||||
@@ -366,7 +366,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith(fixtures.large(20));
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(16);
|
||||
await graph.waitForGraphStable(16);
|
||||
|
||||
await graph.clickFirstResourceNode();
|
||||
expect(graph.findingNodes.length).toBeGreaterThan(0);
|
||||
@@ -388,7 +388,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.hasNodeDetailsModal).toBe(false);
|
||||
expect(graph.hasNodeActionDialog).toBe(false);
|
||||
@@ -406,13 +406,13 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
const fullNodes = graph.nodes.length;
|
||||
await graph.clickFirstFindingNode();
|
||||
await graph.exitFilteredView();
|
||||
await graph.waitForLayoutStable(fullNodes);
|
||||
await graph.waitForGraphStable(fullNodes);
|
||||
expect(graph.isInFilteredView).toBe(false);
|
||||
});
|
||||
|
||||
@@ -420,7 +420,7 @@ describe("exploring the graph", () => {
|
||||
const fixture = fixtures.typical();
|
||||
const graph = await mountWith(fixture);
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
const hoveredNodeId = graph.resourceNodes[0]?.getAttribute("data-id");
|
||||
expect(hoveredNodeId).toBeTruthy();
|
||||
@@ -458,7 +458,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
await graph.clickFirstResourceNodeWithoutFindings();
|
||||
|
||||
@@ -470,7 +470,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
await graph.clickEmptyCanvas();
|
||||
expect(graph.isInFilteredView).toBe(false);
|
||||
@@ -481,7 +481,7 @@ describe("exploring the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
await graph.rapidlyClickFirstFindingNode(2);
|
||||
@@ -493,7 +493,7 @@ describe("exploring the graph", () => {
|
||||
test("double-clicking a node doesn't break state", async ({ mountWith }) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
await graph.dblClickFirstResourceNode();
|
||||
expect(graph.nodes.length).toBeGreaterThan(0);
|
||||
@@ -506,7 +506,7 @@ describe("auto-fitting the viewport", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.minimapMaskStrokeWidth).toBeGreaterThan(0);
|
||||
});
|
||||
@@ -516,7 +516,7 @@ describe("auto-fitting the viewport", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
// Given - zoom into the current overview so newly revealed findings can
|
||||
// sit entirely outside the current frame. The expand auto-fit should then
|
||||
@@ -543,7 +543,7 @@ describe("auto-fitting the viewport", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
const beforeFilter = graph.viewportTransform;
|
||||
@@ -561,7 +561,7 @@ describe("auto-fitting the viewport", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
await graph.clickFirstFindingNode();
|
||||
expect(graph.isInFilteredView).toBe(true);
|
||||
@@ -569,7 +569,7 @@ describe("auto-fitting the viewport", () => {
|
||||
const filterT = graph.viewportTransform;
|
||||
|
||||
await graph.exitFilteredView();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.waitForTransition();
|
||||
|
||||
expect(graph.viewportTransform).not.toBe(filterT);
|
||||
@@ -582,7 +582,7 @@ describe("exporting the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
|
||||
expect(graph.toolbar.isExportButtonEnabled).toBe(true);
|
||||
});
|
||||
@@ -592,7 +592,7 @@ describe("exporting the graph", () => {
|
||||
}) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
|
||||
const png = await graph.captureExportPNG();
|
||||
@@ -608,13 +608,13 @@ describe("running a different query", () => {
|
||||
test("the previous filtered view is cleared", async ({ mountWith }) => {
|
||||
const graph = await mountWith();
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
await graph.expandAllFindings();
|
||||
await graph.clickFirstFindingNode();
|
||||
expect(graph.isInFilteredView).toBe(true);
|
||||
|
||||
await graph.executeQuery();
|
||||
await graph.waitForLayoutStable(3);
|
||||
await graph.waitForGraphStable(3);
|
||||
expect(graph.isInFilteredView).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -276,13 +276,28 @@ export class AttackPathPageHarness {
|
||||
|
||||
// --- Sync helpers ---
|
||||
|
||||
/** Wait until React Flow has rendered at least `expected` node elements. */
|
||||
async waitForLayoutStable(expected = 1, timeoutMs = 3000): Promise<void> {
|
||||
/**
|
||||
* Wait until React Flow has rendered at least `expectedNodes` node elements
|
||||
* and `expectedEdges` edge elements. React Flow renders edges asynchronously
|
||||
* after nodes are measured via ResizeObserver, so tests that assert on edge
|
||||
* state must opt in to waiting for them — node-only waits race against the
|
||||
* edge measurement pass.
|
||||
*/
|
||||
async waitForGraphStable(
|
||||
expectedNodes = 1,
|
||||
expectedEdges = 0,
|
||||
timeoutMs = 3000,
|
||||
): Promise<void> {
|
||||
await vi.waitFor(
|
||||
() => {
|
||||
if (this.nodes.length < expected) {
|
||||
if (this.nodes.length < expectedNodes) {
|
||||
throw new Error(
|
||||
`expected ${expected} nodes, got ${this.nodes.length}`,
|
||||
`expected ${expectedNodes} nodes, got ${this.nodes.length}`,
|
||||
);
|
||||
}
|
||||
if (this.edges.length < expectedEdges) {
|
||||
throw new Error(
|
||||
`expected ${expectedEdges} edges, got ${this.edges.length}`,
|
||||
);
|
||||
}
|
||||
},
|
||||
@@ -397,7 +412,7 @@ export class AttackPathPageHarness {
|
||||
10000,
|
||||
);
|
||||
await this.user.click(button);
|
||||
await this.waitForLayoutStable(1, 10000);
|
||||
await this.waitForGraphStable(1, 0, 10000);
|
||||
}
|
||||
|
||||
async clickNode(nodeId: string): Promise<void> {
|
||||
|
||||
@@ -109,10 +109,10 @@ export function MyComponent() {
|
||||
|
||||
## Adding New shadcn Components
|
||||
|
||||
When adding new shadcn components using the CLI:
|
||||
When adding new shadcn components using the CLI, pin the reviewed CLI version instead of using `@latest`:
|
||||
|
||||
```bash
|
||||
npx shadcn@latest add [component-name]
|
||||
pnpm dlx shadcn@4.7.0 add [component-name]
|
||||
```
|
||||
|
||||
The component will be automatically added to this directory due to the configuration in `components.json`:
|
||||
|
||||
@@ -67,6 +67,10 @@ const ALLOWED_TOOLS = new Set([
|
||||
"prowler_app_search_security_findings",
|
||||
"prowler_app_get_finding_details",
|
||||
"prowler_app_get_findings_overview",
|
||||
// Finding Groups
|
||||
"prowler_app_list_finding_groups",
|
||||
"prowler_app_get_finding_group_details",
|
||||
"prowler_app_list_finding_group_resources",
|
||||
// Providers
|
||||
"prowler_app_search_providers",
|
||||
// Scans
|
||||
|
||||
@@ -28,6 +28,8 @@
|
||||
"test:e2e:headed": "playwright test --project=auth --project=sign-up --project=providers --project=invitations --project=scans --headed",
|
||||
"test:e2e:report": "playwright show-report",
|
||||
"test:e2e:install": "playwright install",
|
||||
"audit": "pnpm audit --audit-level critical",
|
||||
"audit:high": "pnpm audit --audit-level high",
|
||||
"audit:fix": "pnpm audit fix"
|
||||
},
|
||||
"dependencies": {
|
||||
|
||||