mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-04-09 11:17:08 +00:00
Compare commits
1 Commits
mintlify/c
...
feat/aspm-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d99222859 |
316
aspm-manifest.yaml
Normal file
316
aspm-manifest.yaml
Normal file
@@ -0,0 +1,316 @@
|
||||
# Prowler ASPM Agent Manifest
|
||||
# ============================================================
|
||||
# Describe each AI agent deployment and its security posture.
|
||||
# Run: prowler aspm --manifest-path aspm-manifest.yaml
|
||||
#
|
||||
# All fields under each section correspond to ASPM checks.
|
||||
# Set values that accurately reflect your deployment so that
|
||||
# Prowler can generate an accurate posture report.
|
||||
# ============================================================
|
||||
|
||||
agents:
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Example 1: A well-configured production LLM agent
|
||||
# ----------------------------------------------------------
|
||||
- id: agent-docrecommender-prod
|
||||
name: agent-docrecommender-prod
|
||||
environment: prod
|
||||
cloud_provider: aws
|
||||
region: us-east-1
|
||||
|
||||
# 1.1 Identity & Authentication (ASPM-001 to ASPM-012)
|
||||
identity:
|
||||
type: iam_role
|
||||
arn: arn:aws:iam::123456789012:role/agent-docrecommender-prod
|
||||
tags:
|
||||
agent: "true"
|
||||
owner: team-ai
|
||||
env: prod
|
||||
purpose: document-recommendation
|
||||
criticality: high
|
||||
created_at: "2025-01-15"
|
||||
last_used: "2026-03-20"
|
||||
uses_oidc: true
|
||||
uses_static_credentials: false
|
||||
credential_age_days: 30
|
||||
rotation_policy_days: 30
|
||||
naming_compliant: true
|
||||
has_owner_tag: true
|
||||
cross_cloud_registered: true
|
||||
jwt_validation_enabled: true
|
||||
session_duration_seconds: 3600
|
||||
has_deprovisioning_record: true
|
||||
oauth_scope_minimal: true
|
||||
unused_secondary_credentials: false
|
||||
|
||||
# 1.2 Permissions & Least Privilege (ASPM-013 to ASPM-025)
|
||||
permissions:
|
||||
has_wildcard_actions: false
|
||||
has_wildcard_resources: false
|
||||
has_admin_policy: false
|
||||
has_inline_policies: false
|
||||
can_escalate_privileges: false
|
||||
cross_account_access: false
|
||||
cross_account_accounts: 0
|
||||
has_permission_boundary: true
|
||||
shares_role_with_human: false
|
||||
session_duration_seconds: 3600
|
||||
permissions_last_reviewed_days: 45
|
||||
data_domains_accessed:
|
||||
- s3
|
||||
has_condition_on_sensitive_actions: true
|
||||
all_resources_tagged: true
|
||||
permission_changes_approved: true
|
||||
|
||||
# 1.3 Credential Management (ASPM-026 to ASPM-036)
|
||||
credentials:
|
||||
has_hardcoded_secrets: false
|
||||
credentials_in_logs: false
|
||||
uses_secrets_manager: true
|
||||
api_key_in_vcs: false
|
||||
rotation_interval_days: 30
|
||||
secrets_in_iac: false
|
||||
database_uses_proxy: true
|
||||
third_party_keys_managed: true
|
||||
credential_access_audit_trail: true
|
||||
credentials_scoped: true
|
||||
credentials_per_environment: true
|
||||
|
||||
# 1.4 Network & Communication Security (ASPM-037 to ASPM-046)
|
||||
network:
|
||||
uses_https_only: true
|
||||
mtls_enforced: true
|
||||
api_calls_authenticated: true
|
||||
has_rate_limiting: true
|
||||
has_egress_filtering: true
|
||||
network_isolated: true
|
||||
api_gateway_enforced: true
|
||||
validates_tls_certificates: true
|
||||
network_calls_logged: true
|
||||
uses_dnssec: true
|
||||
validates_webhooks: true
|
||||
|
||||
# 1.5 Data Access & Privacy (ASPM-047 to ASPM-057)
|
||||
data_access:
|
||||
accesses_pii: false
|
||||
has_dlp_controls: true
|
||||
data_encrypted_at_rest: true
|
||||
data_encrypted_in_transit: true
|
||||
cross_boundary_data_flows_approved: true
|
||||
training_data_integrity_verified: true
|
||||
data_retention_policy_days: 90
|
||||
database_query_audit_enabled: true
|
||||
object_storage_access_logged: true
|
||||
llm_context_sanitized: true
|
||||
has_model_card: true
|
||||
output_validated_for_sensitive_data: true
|
||||
supports_data_subject_rights: true
|
||||
|
||||
# 1.6 Runtime & Sandbox Security (ASPM-058 to ASPM-067)
|
||||
runtime:
|
||||
runs_as_root: false
|
||||
privileged_container: false
|
||||
has_seccomp_profile: true
|
||||
has_apparmor_selinux: true
|
||||
has_resource_limits: true
|
||||
image_scanned_for_cves: true
|
||||
has_runtime_monitoring: true
|
||||
execution_environment_versioned: true
|
||||
secrets_cleared_from_memory: true
|
||||
has_execution_timeout: true
|
||||
behavior_deterministic: true
|
||||
dependencies_integrity_checked: true
|
||||
uses_platform_security_controls: true
|
||||
|
||||
# 1.7 Supply Chain Security (ASPM-068 to ASPM-076)
|
||||
supply_chain:
|
||||
framework_cves_scanned: true
|
||||
llm_model_provenance_verified: true
|
||||
plugins_security_reviewed: true
|
||||
dependencies_version_pinned: true
|
||||
artifacts_signed: true
|
||||
cicd_has_security_gates: true
|
||||
licenses_compliant: true
|
||||
model_update_cadence_days: 30
|
||||
dependency_checksums_verified: true
|
||||
|
||||
# 1.8 Observability & Monitoring (ASPM-077 to ASPM-086)
|
||||
observability:
|
||||
execution_logs_complete: true
|
||||
anomaly_detection_enabled: true
|
||||
prompt_injection_monitoring: true
|
||||
audit_logs_immutable: true
|
||||
metrics_exported: true
|
||||
security_event_alerting: true
|
||||
distributed_tracing_enabled: true
|
||||
centralized_dashboard: true
|
||||
configuration_drift_tracked: true
|
||||
performance_baseline_defined: true
|
||||
|
||||
# 1.9 Compliance & Governance (ASPM-087 to ASPM-095)
|
||||
compliance:
|
||||
owasp_llm_top10_assessed: true
|
||||
eu_ai_act_controls_present: true
|
||||
nist_ai_rmf_assessed: true
|
||||
access_control_policy_enforced: true
|
||||
dpia_completed: true
|
||||
regulatory_requirements_mapped: true
|
||||
incident_response_plan_exists: true
|
||||
third_party_vendors_assessed: true
|
||||
user_consent_and_disclosure: true
|
||||
|
||||
# 1.10 Attack Path Analysis (ASPM-096 to ASPM-101)
|
||||
attack_paths:
|
||||
cross_cloud_escalation_possible: false
|
||||
tool_abuse_escalation_possible: false
|
||||
sensitive_data_enables_downstream_compromise: false
|
||||
lateral_movement_via_shared_infra: false
|
||||
compromise_enables_full_account_takeover: false
|
||||
llm_output_used_in_code_execution: false
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# Example 2: A poorly-configured staging agent (many FAILs)
|
||||
# ----------------------------------------------------------
|
||||
- id: agent-dataprocessor-staging
|
||||
name: agent-dataprocessor-staging
|
||||
environment: staging
|
||||
cloud_provider: aws
|
||||
region: eu-west-1
|
||||
|
||||
identity:
|
||||
type: iam_role
|
||||
arn: arn:aws:iam::123456789012:role/DataProcessorRole
|
||||
tags: {} # FAIL: ASPM-001 (no tags)
|
||||
created_at: "2023-06-01"
|
||||
last_used: "2025-12-01"
|
||||
uses_oidc: false # FAIL: ASPM-011
|
||||
uses_static_credentials: true # FAIL: ASPM-011
|
||||
credential_age_days: 660 # FAIL: ASPM-007 (>365 days)
|
||||
rotation_policy_days: null # FAIL: ASPM-004
|
||||
naming_compliant: false # FAIL: ASPM-002 (generic name)
|
||||
has_owner_tag: false # FAIL: ASPM-009
|
||||
cross_cloud_registered: false # FAIL: ASPM-003
|
||||
jwt_validation_enabled: false # FAIL: ASPM-006
|
||||
session_duration_seconds: 43200 # FAIL: ASPM-010 (12 hours)
|
||||
has_deprovisioning_record: false # FAIL: ASPM-008
|
||||
oauth_scope_minimal: false # FAIL: ASPM-005
|
||||
unused_secondary_credentials: true # FAIL: ASPM-012
|
||||
|
||||
permissions:
|
||||
has_wildcard_actions: true # FAIL: ASPM-013, ASPM-016
|
||||
has_wildcard_resources: true # FAIL: ASPM-016
|
||||
has_admin_policy: true # FAIL: ASPM-013
|
||||
has_inline_policies: true # FAIL: ASPM-014
|
||||
can_escalate_privileges: true # FAIL: ASPM-015
|
||||
cross_account_access: true
|
||||
cross_account_accounts: 5 # FAIL: ASPM-017 (>3)
|
||||
has_permission_boundary: false # FAIL: ASPM-022
|
||||
shares_role_with_human: true # FAIL: ASPM-023
|
||||
session_duration_seconds: null # FAIL: ASPM-024
|
||||
permissions_last_reviewed_days: null # FAIL: ASPM-018
|
||||
data_domains_accessed:
|
||||
- s3
|
||||
- rds
|
||||
- redshift # FAIL: ASPM-020 (>1 domain)
|
||||
has_condition_on_sensitive_actions: false # FAIL: ASPM-019
|
||||
all_resources_tagged: false # FAIL: ASPM-021
|
||||
permission_changes_approved: false # FAIL: ASPM-025
|
||||
|
||||
credentials:
|
||||
has_hardcoded_secrets: true # FAIL: ASPM-026
|
||||
credentials_in_logs: true # FAIL: ASPM-027
|
||||
uses_secrets_manager: false # FAIL: ASPM-028
|
||||
api_key_in_vcs: true # FAIL: ASPM-029
|
||||
rotation_interval_days: null # FAIL: ASPM-030
|
||||
secrets_in_iac: true # FAIL: ASPM-031
|
||||
database_uses_proxy: false # FAIL: ASPM-032
|
||||
third_party_keys_managed: false # FAIL: ASPM-033
|
||||
credential_access_audit_trail: false # FAIL: ASPM-034
|
||||
credentials_scoped: false # FAIL: ASPM-035
|
||||
credentials_per_environment: false # FAIL: ASPM-036
|
||||
|
||||
network:
|
||||
uses_https_only: false # FAIL: ASPM-037
|
||||
mtls_enforced: false # FAIL: ASPM-037
|
||||
api_calls_authenticated: false # FAIL: ASPM-038
|
||||
has_rate_limiting: false # FAIL: ASPM-039
|
||||
has_egress_filtering: false # FAIL: ASPM-040
|
||||
network_isolated: false # FAIL: ASPM-041
|
||||
api_gateway_enforced: false # FAIL: ASPM-042
|
||||
validates_tls_certificates: false # FAIL: ASPM-043
|
||||
network_calls_logged: false # FAIL: ASPM-044
|
||||
uses_dnssec: false # FAIL: ASPM-045
|
||||
validates_webhooks: false # FAIL: ASPM-046
|
||||
|
||||
data_access:
|
||||
accesses_pii: true
|
||||
has_dlp_controls: false # FAIL: ASPM-047
|
||||
data_encrypted_at_rest: false # FAIL: ASPM-048
|
||||
data_encrypted_in_transit: false # FAIL: ASPM-048
|
||||
cross_boundary_data_flows_approved: false # FAIL: ASPM-049
|
||||
training_data_integrity_verified: false # FAIL: ASPM-050
|
||||
data_retention_policy_days: null # FAIL: ASPM-051
|
||||
database_query_audit_enabled: false # FAIL: ASPM-052
|
||||
object_storage_access_logged: false # FAIL: ASPM-053
|
||||
llm_context_sanitized: false # FAIL: ASPM-054
|
||||
has_model_card: false # FAIL: ASPM-055
|
||||
output_validated_for_sensitive_data: false # FAIL: ASPM-056
|
||||
supports_data_subject_rights: false # FAIL: ASPM-057
|
||||
|
||||
runtime:
|
||||
runs_as_root: true # FAIL: ASPM-058
|
||||
privileged_container: true # FAIL: ASPM-058
|
||||
has_seccomp_profile: false # FAIL: ASPM-058
|
||||
has_apparmor_selinux: false # FAIL: ASPM-058
|
||||
has_resource_limits: false # FAIL: ASPM-059
|
||||
image_scanned_for_cves: false # FAIL: ASPM-060
|
||||
has_runtime_monitoring: false # FAIL: ASPM-061
|
||||
execution_environment_versioned: false # FAIL: ASPM-062
|
||||
secrets_cleared_from_memory: false # FAIL: ASPM-063
|
||||
has_execution_timeout: false # FAIL: ASPM-064
|
||||
behavior_deterministic: false # FAIL: ASPM-065
|
||||
dependencies_integrity_checked: false # FAIL: ASPM-066
|
||||
uses_platform_security_controls: false # FAIL: ASPM-067
|
||||
|
||||
supply_chain:
|
||||
framework_cves_scanned: false # FAIL: ASPM-068
|
||||
llm_model_provenance_verified: false # FAIL: ASPM-069
|
||||
plugins_security_reviewed: false # FAIL: ASPM-070
|
||||
dependencies_version_pinned: false # FAIL: ASPM-071
|
||||
artifacts_signed: false # FAIL: ASPM-072
|
||||
cicd_has_security_gates: false # FAIL: ASPM-073
|
||||
licenses_compliant: false # FAIL: ASPM-074
|
||||
model_update_cadence_days: null # FAIL: ASPM-075
|
||||
dependency_checksums_verified: false # FAIL: ASPM-076
|
||||
|
||||
observability:
|
||||
execution_logs_complete: false # FAIL: ASPM-077
|
||||
anomaly_detection_enabled: false # FAIL: ASPM-078
|
||||
prompt_injection_monitoring: false # FAIL: ASPM-079
|
||||
audit_logs_immutable: false # FAIL: ASPM-080
|
||||
metrics_exported: false # FAIL: ASPM-081
|
||||
security_event_alerting: false # FAIL: ASPM-082
|
||||
distributed_tracing_enabled: false # FAIL: ASPM-083
|
||||
centralized_dashboard: false # FAIL: ASPM-084
|
||||
configuration_drift_tracked: false # FAIL: ASPM-085
|
||||
performance_baseline_defined: false # FAIL: ASPM-086
|
||||
|
||||
compliance:
|
||||
owasp_llm_top10_assessed: false # FAIL: ASPM-087
|
||||
eu_ai_act_controls_present: false # FAIL: ASPM-088
|
||||
nist_ai_rmf_assessed: false # FAIL: ASPM-089
|
||||
access_control_policy_enforced: false # FAIL: ASPM-090
|
||||
dpia_completed: false # FAIL: ASPM-091
|
||||
regulatory_requirements_mapped: false # FAIL: ASPM-092
|
||||
incident_response_plan_exists: false # FAIL: ASPM-093
|
||||
third_party_vendors_assessed: false # FAIL: ASPM-094
|
||||
user_consent_and_disclosure: false # FAIL: ASPM-095
|
||||
|
||||
attack_paths:
|
||||
cross_cloud_escalation_possible: true # FAIL: ASPM-096
|
||||
tool_abuse_escalation_possible: true # FAIL: ASPM-097
|
||||
sensitive_data_enables_downstream_compromise: true # FAIL: ASPM-098
|
||||
lateral_movement_via_shared_infra: true # FAIL: ASPM-099
|
||||
compromise_enables_full_account_takeover: true # FAIL: ASPM-100
|
||||
llm_output_used_in_code_execution: true # FAIL: ASPM-101
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.22.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- New `aspm` provider for Agent Security Posture Management with 101 checks across 10 categories covering identity, permissions, credentials, network, data access, runtime, supply chain, observability, compliance, and attack path analysis for AI agent deployments
|
||||
|
||||
---
|
||||
|
||||
## [5.21.0] (Prowler v5.21.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
@@ -1240,6 +1240,38 @@ class CheckReportMongoDBAtlas(Check_Report):
|
||||
self.location = getattr(resource, "location", self.project_id)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckReportASPM(Check_Report):
|
||||
"""Contains the ASPM Check's finding information.
|
||||
|
||||
Attributes:
|
||||
resource_name: Human-readable agent name.
|
||||
resource_id: Unique agent identifier.
|
||||
environment: Deployment environment (prod/staging/dev).
|
||||
cloud_provider: Cloud provider (aws/azure/gcp).
|
||||
"""
|
||||
|
||||
resource_name: str
|
||||
resource_id: str
|
||||
environment: str
|
||||
cloud_provider: str
|
||||
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialise the ASPM Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The check metadata.
|
||||
resource: An AgentConfig instance or compatible dict.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = getattr(
|
||||
resource, "name", getattr(resource, "resource_name", "")
|
||||
)
|
||||
self.resource_id = getattr(resource, "id", getattr(resource, "resource_id", ""))
|
||||
self.environment = getattr(resource, "environment", "unknown")
|
||||
self.cloud_provider = getattr(resource, "cloud_provider", "unknown")
|
||||
|
||||
|
||||
# Testing Pending
|
||||
def load_check_metadata(metadata_file: str) -> CheckMetadata:
|
||||
"""
|
||||
|
||||
@@ -27,10 +27,10 @@ class ProwlerArgumentParser:
|
||||
self.parser = argparse.ArgumentParser(
|
||||
prog="prowler",
|
||||
formatter_class=RawTextHelpFormatter,
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,dashboard,iac,image} ...",
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,dashboard,iac,image,aspm} ...",
|
||||
epilog="""
|
||||
Available Cloud Providers:
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack}
|
||||
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,aspm}
|
||||
aws AWS Provider
|
||||
azure Azure Provider
|
||||
gcp GCP Provider
|
||||
@@ -45,6 +45,7 @@ Available Cloud Providers:
|
||||
iac IaC Provider (Beta)
|
||||
llm LLM Provider (Beta)
|
||||
image Container Image Provider
|
||||
aspm Agent Security Posture Management (ASPM) Provider (Beta)
|
||||
nhn NHN Provider (Unofficial)
|
||||
mongodbatlas MongoDB Atlas Provider (Beta)
|
||||
|
||||
@@ -426,3 +427,31 @@ Detailed documentation at https://docs.prowler.com
|
||||
action="store_true",
|
||||
help="Send a summary of the execution with a Slack APP in your channel. Environment variables SLACK_API_TOKEN and SLACK_CHANNEL_NAME are required (see more in https://docs.prowler.com/user-guide/cli/tutorials/integrations#configuration-of-the-integration-with-slack/).",
|
||||
)
|
||||
# Datadog Integration
|
||||
datadog_subparser = self.common_providers_parser.add_argument_group(
|
||||
"Datadog Integration"
|
||||
)
|
||||
datadog_subparser.add_argument(
|
||||
"--datadog",
|
||||
action="store_true",
|
||||
help="Send findings to Datadog via the Logs API. Requires --datadog-api-key or the DATADOG_API_KEY environment variable.",
|
||||
)
|
||||
datadog_subparser.add_argument(
|
||||
"--datadog-api-key",
|
||||
nargs="?",
|
||||
default=None,
|
||||
metavar="DATADOG_API_KEY",
|
||||
help="Datadog API key. Can also be set via the DATADOG_API_KEY environment variable.",
|
||||
)
|
||||
datadog_subparser.add_argument(
|
||||
"--datadog-site",
|
||||
nargs="?",
|
||||
default="datadoghq.com",
|
||||
metavar="DATADOG_SITE",
|
||||
help="Datadog site to send findings to (default: datadoghq.com). Options: datadoghq.com, us3.datadoghq.com, us5.datadoghq.com, datadoghq.eu, ap1.datadoghq.com, ddog-gov.com.",
|
||||
)
|
||||
datadog_subparser.add_argument(
|
||||
"--send-dd-only-fails",
|
||||
action="store_true",
|
||||
help="Send only FAIL findings to Datadog.",
|
||||
)
|
||||
|
||||
0
prowler/providers/aspm/__init__.py
Normal file
0
prowler/providers/aspm/__init__.py
Normal file
316
prowler/providers/aspm/aspm_provider.py
Normal file
316
prowler/providers/aspm/aspm_provider.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""ASPM (Agent Security Posture Management) Provider.
|
||||
|
||||
Reads an agent manifest file (YAML or JSON) that describes the security
|
||||
configuration of deployed AI agents and exposes the parsed agent list to the
|
||||
check engine.
|
||||
|
||||
Manifest format (YAML):
|
||||
|
||||
agents:
|
||||
- id: agent-001
|
||||
name: agent-docrecommender-prod
|
||||
environment: prod # prod | staging | dev
|
||||
cloud_provider: aws # aws | azure | gcp
|
||||
region: us-east-1
|
||||
|
||||
identity:
|
||||
type: iam_role
|
||||
arn: arn:aws:iam::123456789012:role/agent-docrecommender-prod
|
||||
tags:
|
||||
agent: "true"
|
||||
owner: team-ai
|
||||
env: prod
|
||||
purpose: document-recommendation
|
||||
created_at: "2025-01-15"
|
||||
last_used: "2026-03-01"
|
||||
uses_oidc: true
|
||||
uses_static_credentials: false
|
||||
credential_age_days: 45
|
||||
rotation_policy_days: 90
|
||||
naming_compliant: true
|
||||
has_owner_tag: true
|
||||
session_duration_seconds: 3600
|
||||
|
||||
permissions:
|
||||
has_wildcard_actions: false
|
||||
has_admin_policy: false
|
||||
has_permission_boundary: true
|
||||
shares_role_with_human: false
|
||||
data_domains_accessed: ["s3"]
|
||||
|
||||
credentials:
|
||||
uses_secrets_manager: true
|
||||
has_hardcoded_secrets: false
|
||||
rotation_interval_days: 30
|
||||
credentials_per_environment: true
|
||||
|
||||
network:
|
||||
uses_https_only: true
|
||||
has_egress_filtering: true
|
||||
has_rate_limiting: true
|
||||
validates_tls_certificates: true
|
||||
|
||||
data_access:
|
||||
accesses_pii: false
|
||||
data_encrypted_at_rest: true
|
||||
data_encrypted_in_transit: true
|
||||
|
||||
runtime:
|
||||
runs_as_root: false
|
||||
privileged_container: false
|
||||
has_resource_limits: true
|
||||
image_scanned_for_cves: true
|
||||
|
||||
supply_chain:
|
||||
framework_cves_scanned: true
|
||||
dependencies_version_pinned: true
|
||||
artifacts_signed: true
|
||||
|
||||
observability:
|
||||
execution_logs_complete: true
|
||||
audit_logs_immutable: true
|
||||
security_event_alerting: true
|
||||
|
||||
compliance:
|
||||
owasp_llm_top10_assessed: true
|
||||
incident_response_plan_exists: true
|
||||
|
||||
attack_paths:
|
||||
cross_cloud_escalation_possible: false
|
||||
compromise_enables_full_account_takeover: false
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from typing import List, Optional
|
||||
|
||||
import yaml
|
||||
from colorama import Fore, Style
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.utils.utils import print_boxes
|
||||
from prowler.providers.aspm.exceptions.exceptions import (
|
||||
ASPMManifestInvalidError,
|
||||
ASPMManifestNotFoundError,
|
||||
ASPMNoAgentsFoundError,
|
||||
)
|
||||
from prowler.providers.aspm.models import AgentConfig
|
||||
from prowler.providers.common.models import Audit_Metadata
|
||||
from prowler.providers.common.provider import Provider
|
||||
|
||||
|
||||
class AspmProvider(Provider):
|
||||
"""Provider for AI Agent Security Posture Management (ASPM).
|
||||
|
||||
Parses an agent manifest file and exposes the list of agent configurations
|
||||
to the check engine.
|
||||
|
||||
Attributes:
|
||||
_type: Provider type identifier ("aspm").
|
||||
manifest_path: Path to the agent manifest file.
|
||||
agents: Parsed and validated list of AgentConfig objects.
|
||||
environment_filter: Optional environment filter (prod/staging/dev).
|
||||
cloud_provider_filter: Optional cloud provider filter (aws/azure/gcp).
|
||||
audit_metadata: Prowler audit metadata.
|
||||
"""
|
||||
|
||||
_type: str = "aspm"
|
||||
audit_metadata: Audit_Metadata
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
manifest_path: str = "aspm-manifest.yaml",
|
||||
environment: Optional[str] = None,
|
||||
cloud_provider: Optional[str] = None,
|
||||
config_path: Optional[str] = None,
|
||||
config_content: Optional[dict] = None,
|
||||
fixer_config: dict = {},
|
||||
provider_uid: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialise the ASPM provider.
|
||||
|
||||
Args:
|
||||
manifest_path: Path to the YAML/JSON agent manifest file.
|
||||
environment: Optional filter — only assess agents in this env.
|
||||
cloud_provider: Optional filter — only assess agents on this cloud.
|
||||
config_path: Prowler global config file path.
|
||||
config_content: Prowler global config as a dict.
|
||||
fixer_config: Fixer configuration.
|
||||
provider_uid: Unique identifier for push-to-cloud integration.
|
||||
"""
|
||||
logger.info("Instantiating ASPM Provider...")
|
||||
|
||||
self.manifest_path = manifest_path
|
||||
self.environment_filter = environment
|
||||
self.cloud_provider_filter = cloud_provider
|
||||
self._provider_uid = provider_uid
|
||||
self._session = None
|
||||
self._identity = "prowler"
|
||||
self._auth_method = "No auth"
|
||||
self.region = "global"
|
||||
self.audited_account = "local-aspm"
|
||||
|
||||
# Load and parse the manifest
|
||||
self.agents: List[AgentConfig] = self._load_manifest()
|
||||
|
||||
# Audit config
|
||||
from prowler.config.config import (
|
||||
default_config_file_path,
|
||||
load_and_validate_config_file,
|
||||
)
|
||||
|
||||
if config_content:
|
||||
self._audit_config = config_content
|
||||
elif config_path and config_path != default_config_file_path:
|
||||
self._audit_config = load_and_validate_config_file(self._type, config_path)
|
||||
else:
|
||||
self._audit_config = {}
|
||||
|
||||
self._fixer_config = fixer_config
|
||||
self._mutelist = None
|
||||
|
||||
self.audit_metadata = Audit_Metadata(
|
||||
provider=self._type,
|
||||
account_id=self.audited_account,
|
||||
account_name="aspm",
|
||||
region=self.region,
|
||||
services_scanned=0,
|
||||
expected_checks=[],
|
||||
completed_checks=0,
|
||||
audit_progress=0,
|
||||
)
|
||||
|
||||
Provider.set_global_provider(self)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Provider interface (abstract method implementations)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def type(self) -> str:
|
||||
"""Provider type identifier."""
|
||||
return self._type
|
||||
|
||||
@property
|
||||
def identity(self) -> str:
|
||||
"""Provider identity string."""
|
||||
return self._identity
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
"""ASPM provider has no cloud session."""
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def audit_config(self) -> dict:
|
||||
"""Prowler audit configuration."""
|
||||
return self._audit_config
|
||||
|
||||
@property
|
||||
def fixer_config(self) -> dict:
|
||||
"""Fixer configuration."""
|
||||
return self._fixer_config
|
||||
|
||||
@property
|
||||
def auth_method(self) -> str:
|
||||
"""Authentication method description."""
|
||||
return self._auth_method
|
||||
|
||||
def setup_session(self) -> None:
|
||||
"""ASPM provider does not require a cloud session."""
|
||||
|
||||
def print_credentials(self) -> None:
|
||||
"""Display provider summary in the CLI output."""
|
||||
report_title = (
|
||||
f"{Style.BRIGHT}Scanning AI Agent Security Posture:{Style.RESET_ALL}"
|
||||
)
|
||||
report_lines = [
|
||||
f"Manifest: {Fore.YELLOW}{self.manifest_path}{Style.RESET_ALL}",
|
||||
f"Agents loaded: {Fore.YELLOW}{len(self.agents)}{Style.RESET_ALL}",
|
||||
]
|
||||
if self.environment_filter:
|
||||
report_lines.append(
|
||||
f"Environment filter: {Fore.YELLOW}{self.environment_filter}{Style.RESET_ALL}"
|
||||
)
|
||||
if self.cloud_provider_filter:
|
||||
report_lines.append(
|
||||
f"Cloud provider filter: {Fore.YELLOW}{self.cloud_provider_filter}{Style.RESET_ALL}"
|
||||
)
|
||||
print_boxes(report_lines, report_title)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Manifest loading
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _load_manifest(self) -> List[AgentConfig]:
|
||||
"""Load and parse the agent manifest file.
|
||||
|
||||
Returns:
|
||||
A list of validated AgentConfig objects.
|
||||
|
||||
Raises:
|
||||
SystemExit: On unrecoverable manifest errors.
|
||||
"""
|
||||
import os
|
||||
|
||||
if not os.path.exists(self.manifest_path):
|
||||
logger.critical(ASPMManifestNotFoundError(self.manifest_path).message)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with open(self.manifest_path, "r", encoding="utf-8") as fh:
|
||||
if self.manifest_path.endswith(".json"):
|
||||
raw = json.load(fh)
|
||||
else:
|
||||
raw = yaml.safe_load(fh)
|
||||
except Exception as exc:
|
||||
logger.critical(
|
||||
ASPMManifestInvalidError(self.manifest_path, str(exc)).message
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not isinstance(raw, dict) or "agents" not in raw:
|
||||
logger.critical(
|
||||
ASPMManifestInvalidError(
|
||||
self.manifest_path,
|
||||
"Root key 'agents' not found. "
|
||||
"The manifest must contain a top-level 'agents' list.",
|
||||
).message
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
raw_agents = raw.get("agents", [])
|
||||
if not raw_agents:
|
||||
logger.critical(ASPMNoAgentsFoundError().message)
|
||||
sys.exit(1)
|
||||
|
||||
agents: List[AgentConfig] = []
|
||||
for entry in raw_agents:
|
||||
try:
|
||||
agent = AgentConfig(**entry)
|
||||
# Apply optional filters
|
||||
if (
|
||||
self.environment_filter
|
||||
and agent.environment != self.environment_filter
|
||||
):
|
||||
continue
|
||||
if (
|
||||
self.cloud_provider_filter
|
||||
and agent.cloud_provider != self.cloud_provider_filter
|
||||
):
|
||||
continue
|
||||
agents.append(agent)
|
||||
except Exception as exc:
|
||||
agent_id = entry.get("id", "<unknown>")
|
||||
logger.error(
|
||||
f"Skipping agent '{agent_id}' — manifest validation error: {exc}"
|
||||
)
|
||||
|
||||
if not agents:
|
||||
logger.warning(
|
||||
"No agents matched the specified filters. "
|
||||
"The assessment will produce no findings."
|
||||
)
|
||||
|
||||
logger.info(f"Loaded {len(agents)} agent(s) from {self.manifest_path}")
|
||||
return agents
|
||||
0
prowler/providers/aspm/exceptions/__init__.py
Normal file
0
prowler/providers/aspm/exceptions/__init__.py
Normal file
36
prowler/providers/aspm/exceptions/exceptions.py
Normal file
36
prowler/providers/aspm/exceptions/exceptions.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""ASPM Provider exceptions."""
|
||||
|
||||
|
||||
class ASPMBaseException(Exception):
|
||||
"""Base exception for the ASPM provider."""
|
||||
|
||||
def __init__(self, message: str = ""):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class ASPMManifestNotFoundError(ASPMBaseException):
|
||||
"""Raised when the ASPM agent manifest file is not found."""
|
||||
|
||||
def __init__(self, path: str):
|
||||
super().__init__(f"ASPM manifest file not found: {path}")
|
||||
|
||||
|
||||
class ASPMManifestInvalidError(ASPMBaseException):
|
||||
"""Raised when the ASPM agent manifest file cannot be parsed."""
|
||||
|
||||
def __init__(self, path: str, detail: str = ""):
|
||||
msg = f"ASPM manifest file is invalid: {path}"
|
||||
if detail:
|
||||
msg += f" — {detail}"
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class ASPMNoAgentsFoundError(ASPMBaseException):
|
||||
"""Raised when the manifest contains no agents to assess."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
"No agents found in the ASPM manifest. "
|
||||
"Ensure the manifest contains at least one entry under 'agents'."
|
||||
)
|
||||
0
prowler/providers/aspm/lib/__init__.py
Normal file
0
prowler/providers/aspm/lib/__init__.py
Normal file
0
prowler/providers/aspm/lib/arguments/__init__.py
Normal file
0
prowler/providers/aspm/lib/arguments/__init__.py
Normal file
67
prowler/providers/aspm/lib/arguments/arguments.py
Normal file
67
prowler/providers/aspm/lib/arguments/arguments.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""ASPM Provider CLI argument definitions."""
|
||||
|
||||
|
||||
def init_parser(self):
|
||||
"""Init the ASPM Provider CLI parser."""
|
||||
aspm_parser = self.subparsers.add_parser(
|
||||
"aspm",
|
||||
parents=[self.common_providers_parser],
|
||||
help="Agent Security Posture Management (ASPM) Provider (Beta)",
|
||||
)
|
||||
|
||||
aspm_scan_group = aspm_parser.add_argument_group("ASPM Scan Options")
|
||||
|
||||
aspm_scan_group.add_argument(
|
||||
"--manifest-path",
|
||||
"-M",
|
||||
dest="manifest_path",
|
||||
default="aspm-manifest.yaml",
|
||||
help=(
|
||||
"Path to the ASPM agent manifest file (YAML or JSON) describing "
|
||||
"deployed AI agent security configurations. "
|
||||
"Default: aspm-manifest.yaml"
|
||||
),
|
||||
)
|
||||
|
||||
aspm_scan_group.add_argument(
|
||||
"--environment",
|
||||
dest="environment",
|
||||
default=None,
|
||||
choices=["prod", "staging", "dev"],
|
||||
help=(
|
||||
"Filter the assessment to agents in a specific environment. "
|
||||
"Default: all environments."
|
||||
),
|
||||
)
|
||||
|
||||
aspm_scan_group.add_argument(
|
||||
"--cloud-provider",
|
||||
dest="cloud_provider",
|
||||
default=None,
|
||||
choices=["aws", "azure", "gcp"],
|
||||
help=(
|
||||
"Filter the assessment to agents running on a specific cloud provider. "
|
||||
"Default: all cloud providers."
|
||||
),
|
||||
)
|
||||
|
||||
aspm_scan_group.add_argument(
|
||||
"--provider-uid",
|
||||
dest="provider_uid",
|
||||
default=None,
|
||||
help="Unique identifier for this ASPM scan (used with --push-to-cloud).",
|
||||
)
|
||||
|
||||
|
||||
def validate_arguments(arguments):
|
||||
"""Validate ASPM provider arguments."""
|
||||
import os
|
||||
|
||||
manifest_path = getattr(arguments, "manifest_path", "aspm-manifest.yaml")
|
||||
if not os.path.exists(manifest_path):
|
||||
return (
|
||||
False,
|
||||
f"ASPM manifest file not found: '{manifest_path}'. "
|
||||
"Use --manifest-path to specify the correct path.",
|
||||
)
|
||||
return (True, "")
|
||||
0
prowler/providers/aspm/lib/service/__init__.py
Normal file
0
prowler/providers/aspm/lib/service/__init__.py
Normal file
28
prowler/providers/aspm/lib/service/service.py
Normal file
28
prowler/providers/aspm/lib/service/service.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""ASPM base service class."""
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aspm.aspm_provider import AspmProvider
|
||||
|
||||
|
||||
class AspmService:
|
||||
"""Base class for all ASPM services.
|
||||
|
||||
Each subclass is responsible for a specific check category (identity,
|
||||
permissions, credentials, …). On construction the service receives the
|
||||
global AspmProvider instance and exposes the filtered list of agents that
|
||||
the checks iterate over.
|
||||
|
||||
Attributes:
|
||||
provider: The active AspmProvider instance.
|
||||
agents: The list of AgentConfig objects to assess.
|
||||
"""
|
||||
|
||||
def __init__(self, provider: AspmProvider) -> None:
|
||||
"""Initialise the service with a reference to the provider.
|
||||
|
||||
Args:
|
||||
provider: The active AspmProvider instance.
|
||||
"""
|
||||
logger.info(f"Initialising {self.__class__.__name__}...")
|
||||
self.provider = provider
|
||||
self.agents = provider.agents
|
||||
573
prowler/providers/aspm/models.py
Normal file
573
prowler/providers/aspm/models.py
Normal file
@@ -0,0 +1,573 @@
|
||||
"""ASPM Provider data models.
|
||||
|
||||
These models represent the security posture of AI agent deployments as
|
||||
declared in an agent manifest file (YAML/JSON). Each model field
|
||||
corresponds directly to a check category defined in the ASPM check suite.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from prowler.config.config import output_file_timestamp
|
||||
from prowler.providers.common.models import ProviderOutputOptions
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Sub-models per check category
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class AgentIdentityConfig(BaseModel):
|
||||
"""Identity & Authentication configuration for an AI agent."""
|
||||
|
||||
type: str = Field(
|
||||
default="iam_role",
|
||||
description="Credential type: iam_role | managed_identity | service_account | api_key",
|
||||
)
|
||||
arn: Optional[str] = Field(default=None, description="Full ARN / resource ID")
|
||||
tags: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Tags applied to the identity resource",
|
||||
)
|
||||
created_at: Optional[date] = Field(default=None, description="Creation date")
|
||||
last_used: Optional[date] = Field(
|
||||
default=None, description="Last authentication date"
|
||||
)
|
||||
uses_oidc: bool = Field(
|
||||
default=False,
|
||||
description="Whether OIDC/Workload Identity federation is used instead of static keys",
|
||||
)
|
||||
uses_static_credentials: bool = Field(
|
||||
default=True,
|
||||
description="Whether static (long-lived) credentials are used",
|
||||
)
|
||||
credential_age_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Age of the credentials in days (None = unknown)",
|
||||
)
|
||||
rotation_policy_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum allowed credential age before rotation (days)",
|
||||
)
|
||||
naming_compliant: bool = Field(
|
||||
default=True,
|
||||
description="Whether the identity name follows organisational naming conventions",
|
||||
)
|
||||
has_owner_tag: bool = Field(
|
||||
default=False,
|
||||
description="Whether the identity has an 'owner' tag linking it to a team",
|
||||
)
|
||||
cross_cloud_registered: bool = Field(
|
||||
default=True,
|
||||
description="For multi-cloud: whether the identity is registered in all target clouds",
|
||||
)
|
||||
jwt_validation_enabled: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent-to-agent JWT claims (exp, iss, sub, aud) are validated",
|
||||
)
|
||||
session_duration_seconds: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Max assumed-role session duration in seconds (None = unlimited)",
|
||||
)
|
||||
has_deprovisioning_record: bool = Field(
|
||||
default=True,
|
||||
description="Whether a deprovisioning record / SOP exists for this identity",
|
||||
)
|
||||
oauth_scope_minimal: bool = Field(
|
||||
default=True,
|
||||
description="Whether OAuth tokens are requested with minimal required scopes",
|
||||
)
|
||||
unused_secondary_credentials: bool = Field(
|
||||
default=False,
|
||||
description="Whether unused secondary credentials (backup keys) exist",
|
||||
)
|
||||
|
||||
|
||||
class AgentPermissionsConfig(BaseModel):
|
||||
"""Permissions & Least Privilege configuration for an AI agent."""
|
||||
|
||||
has_wildcard_actions: bool = Field(
|
||||
default=False,
|
||||
description="Whether any policy grants wildcard actions (s3:*, *:*)",
|
||||
)
|
||||
has_wildcard_resources: bool = Field(
|
||||
default=False,
|
||||
description="Whether any policy grants wildcard resource ARNs (*)",
|
||||
)
|
||||
has_admin_policy: bool = Field(
|
||||
default=False,
|
||||
description="Whether an admin or power-user managed policy is attached",
|
||||
)
|
||||
has_inline_policies: bool = Field(
|
||||
default=False,
|
||||
description="Whether inline policies (instead of managed) are attached",
|
||||
)
|
||||
can_escalate_privileges: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent can escalate to human admin roles",
|
||||
)
|
||||
cross_account_access: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent has cross-account permissions",
|
||||
)
|
||||
cross_account_accounts: int = Field(
|
||||
default=0,
|
||||
description="Number of accounts the agent can access cross-account",
|
||||
)
|
||||
has_permission_boundary: bool = Field(
|
||||
default=False,
|
||||
description="Whether a permission boundary enforces the maximum permission set",
|
||||
)
|
||||
shares_role_with_human: bool = Field(
|
||||
default=False,
|
||||
description="Whether humans share the same role as this agent",
|
||||
)
|
||||
session_duration_seconds: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Max session duration in seconds for assumed roles",
|
||||
)
|
||||
permissions_last_reviewed_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Days since permissions were last reviewed (None = never)",
|
||||
)
|
||||
data_domains_accessed: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="List of data domains accessible (e.g. ['s3', 'rds', 'redshift'])",
|
||||
)
|
||||
has_condition_on_sensitive_actions: bool = Field(
|
||||
default=True,
|
||||
description="Whether conditions (IP, tag, time) restrict high-risk permissions",
|
||||
)
|
||||
all_resources_tagged: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent service principals carry all required governance tags",
|
||||
)
|
||||
permission_changes_approved: bool = Field(
|
||||
default=True,
|
||||
description="Whether all permission changes are traceable to approved change requests",
|
||||
)
|
||||
|
||||
|
||||
class AgentCredentialsConfig(BaseModel):
|
||||
"""Credential Management configuration for an AI agent."""
|
||||
|
||||
has_hardcoded_secrets: bool = Field(
|
||||
default=False,
|
||||
description="Whether hardcoded credentials exist in code, IaC, or manifests",
|
||||
)
|
||||
credentials_in_logs: bool = Field(
|
||||
default=False,
|
||||
description="Whether credentials appear in logs or error messages",
|
||||
)
|
||||
uses_secrets_manager: bool = Field(
|
||||
default=False,
|
||||
description="Whether credentials are retrieved from a cloud secrets manager",
|
||||
)
|
||||
api_key_in_vcs: bool = Field(
|
||||
default=False,
|
||||
description="Whether API keys or tokens have been committed to version control",
|
||||
)
|
||||
rotation_interval_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Actual credential rotation interval in days (None = no rotation)",
|
||||
)
|
||||
secrets_in_iac: bool = Field(
|
||||
default=False,
|
||||
description="Whether secrets are embedded in Terraform / CloudFormation",
|
||||
)
|
||||
database_uses_proxy: bool = Field(
|
||||
default=False,
|
||||
description="Whether database connections use a managed proxy (RDS Proxy / Cloud SQL Proxy)",
|
||||
)
|
||||
third_party_keys_managed: bool = Field(
|
||||
default=True,
|
||||
description="Whether third-party API keys (Slack, GitHub, etc.) are in secrets manager",
|
||||
)
|
||||
credential_access_audit_trail: bool = Field(
|
||||
default=False,
|
||||
description="Whether credential access (GetSecretValue, etc.) is logged and monitored",
|
||||
)
|
||||
credentials_scoped: bool = Field(
|
||||
default=True,
|
||||
description="Whether credentials have minimal scope (not admin/full-access)",
|
||||
)
|
||||
credentials_per_environment: bool = Field(
|
||||
default=True,
|
||||
description="Whether separate credentials are used per environment (dev/staging/prod)",
|
||||
)
|
||||
|
||||
|
||||
class AgentNetworkConfig(BaseModel):
|
||||
"""Network & Communication Security configuration for an AI agent."""
|
||||
|
||||
uses_https_only: bool = Field(
|
||||
default=True,
|
||||
description="Whether all agent API calls use HTTPS / TLS 1.2+",
|
||||
)
|
||||
mtls_enforced: bool = Field(
|
||||
default=False,
|
||||
description="Whether mTLS is enforced in the service mesh for agent-to-agent communication",
|
||||
)
|
||||
api_calls_authenticated: bool = Field(
|
||||
default=True,
|
||||
description="Whether all internal API calls require authentication",
|
||||
)
|
||||
has_rate_limiting: bool = Field(
|
||||
default=False,
|
||||
description="Whether rate limiting is configured on agent API endpoints",
|
||||
)
|
||||
has_egress_filtering: bool = Field(
|
||||
default=False,
|
||||
description="Whether outbound network access is filtered by destination",
|
||||
)
|
||||
network_isolated: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent runs in an isolated network segment",
|
||||
)
|
||||
api_gateway_enforced: bool = Field(
|
||||
default=True,
|
||||
description="Whether all API access routes through an authenticated API Gateway",
|
||||
)
|
||||
validates_tls_certificates: bool = Field(
|
||||
default=True,
|
||||
description="Whether TLS certificates are fully validated (chain, hostname, expiry)",
|
||||
)
|
||||
network_calls_logged: bool = Field(
|
||||
default=False,
|
||||
description="Whether all network calls are logged with source agent ID and destination",
|
||||
)
|
||||
uses_dnssec: bool = Field(
|
||||
default=False,
|
||||
description="Whether DNS queries use DNSSEC / DoH / DoT",
|
||||
)
|
||||
validates_webhooks: bool = Field(
|
||||
default=True,
|
||||
description="Whether incoming webhooks/callbacks are signature-validated",
|
||||
)
|
||||
|
||||
|
||||
class AgentDataAccessConfig(BaseModel):
|
||||
"""Data Access & Privacy configuration for an AI agent."""
|
||||
|
||||
accesses_pii: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent can access Personally Identifiable Information",
|
||||
)
|
||||
has_dlp_controls: bool = Field(
|
||||
default=False,
|
||||
description="Whether Data Loss Prevention controls are enforced on PII access",
|
||||
)
|
||||
data_encrypted_at_rest: bool = Field(
|
||||
default=True,
|
||||
description="Whether all data stores accessed by the agent use encryption at rest",
|
||||
)
|
||||
data_encrypted_in_transit: bool = Field(
|
||||
default=True,
|
||||
description="Whether all data in transit is encrypted (TLS)",
|
||||
)
|
||||
cross_boundary_data_flows_approved: bool = Field(
|
||||
default=True,
|
||||
description="Whether cross-boundary data flows are whitelisted and documented",
|
||||
)
|
||||
training_data_integrity_verified: bool = Field(
|
||||
default=False,
|
||||
description="Whether training data sources are validated with integrity checks",
|
||||
)
|
||||
data_retention_policy_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum data retention period in days (None = no policy)",
|
||||
)
|
||||
database_query_audit_enabled: bool = Field(
|
||||
default=False,
|
||||
description="Whether database queries from the agent are fully audited",
|
||||
)
|
||||
object_storage_access_logged: bool = Field(
|
||||
default=False,
|
||||
description="Whether object storage (S3/Blob/GCS) access is logged",
|
||||
)
|
||||
llm_context_sanitized: bool = Field(
|
||||
default=False,
|
||||
description="Whether sensitive data is stripped from LLM context windows",
|
||||
)
|
||||
has_model_card: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent's model has a documented model card",
|
||||
)
|
||||
output_validated_for_sensitive_data: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent outputs are validated and redacted for sensitive data",
|
||||
)
|
||||
supports_data_subject_rights: bool = Field(
|
||||
default=False,
|
||||
description="Whether the system supports GDPR/CCPA data subject access requests",
|
||||
)
|
||||
|
||||
|
||||
class AgentRuntimeConfig(BaseModel):
|
||||
"""Runtime & Sandbox Security configuration for an AI agent."""
|
||||
|
||||
runs_as_root: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent container/process runs as root",
|
||||
)
|
||||
privileged_container: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent runs in a privileged container",
|
||||
)
|
||||
has_seccomp_profile: bool = Field(
|
||||
default=False,
|
||||
description="Whether a seccomp profile is applied to the container",
|
||||
)
|
||||
has_apparmor_selinux: bool = Field(
|
||||
default=False,
|
||||
description="Whether AppArmor or SELinux policy is applied",
|
||||
)
|
||||
has_resource_limits: bool = Field(
|
||||
default=False,
|
||||
description="Whether CPU, memory, and disk limits are configured",
|
||||
)
|
||||
image_scanned_for_cves: bool = Field(
|
||||
default=False,
|
||||
description="Whether the container image is scanned for vulnerabilities before deployment",
|
||||
)
|
||||
has_runtime_monitoring: bool = Field(
|
||||
default=False,
|
||||
description="Whether runtime security monitoring (Falco, Sysdig, etc.) is enabled",
|
||||
)
|
||||
execution_environment_versioned: bool = Field(
|
||||
default=False,
|
||||
description="Whether the execution environment uses pinned base images and IaC",
|
||||
)
|
||||
secrets_cleared_from_memory: bool = Field(
|
||||
default=False,
|
||||
description="Whether sensitive data is cleared from memory after use",
|
||||
)
|
||||
has_execution_timeout: bool = Field(
|
||||
default=False,
|
||||
description="Whether execution time limits are enforced",
|
||||
)
|
||||
behavior_deterministic: bool = Field(
|
||||
default=True,
|
||||
description="Whether the agent produces deterministic, reproducible behaviour",
|
||||
)
|
||||
dependencies_integrity_checked: bool = Field(
|
||||
default=False,
|
||||
description="Whether runtime dependencies are verified via checksums/signatures",
|
||||
)
|
||||
uses_platform_security_controls: bool = Field(
|
||||
default=False,
|
||||
description="Whether platform-native controls (Pod Security Standards, Binary Authorization) are applied",
|
||||
)
|
||||
|
||||
|
||||
class AgentSupplyChainConfig(BaseModel):
|
||||
"""Supply Chain Security configuration for an AI agent."""
|
||||
|
||||
framework_cves_scanned: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent frameworks (LangChain, etc.) are scanned for CVEs",
|
||||
)
|
||||
llm_model_provenance_verified: bool = Field(
|
||||
default=False,
|
||||
description="Whether the LLM model has verified provenance (checksum, signed source)",
|
||||
)
|
||||
plugins_security_reviewed: bool = Field(
|
||||
default=False,
|
||||
description="Whether all agent plugins/tools have been security-reviewed",
|
||||
)
|
||||
dependencies_version_pinned: bool = Field(
|
||||
default=False,
|
||||
description="Whether all dependencies use exact pinned versions with lock files",
|
||||
)
|
||||
artifacts_signed: bool = Field(
|
||||
default=False,
|
||||
description="Whether container images and artifacts are cryptographically signed",
|
||||
)
|
||||
cicd_has_security_gates: bool = Field(
|
||||
default=False,
|
||||
description="Whether the CI/CD pipeline includes secret scanning, SAST, and dependency scanning",
|
||||
)
|
||||
licenses_compliant: bool = Field(
|
||||
default=True,
|
||||
description="Whether all model and library licenses are documented and compliant",
|
||||
)
|
||||
model_update_cadence_days: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum allowed days between security updates to the LLM model",
|
||||
)
|
||||
dependency_checksums_verified: bool = Field(
|
||||
default=False,
|
||||
description="Whether package checksums/signatures are verified on download",
|
||||
)
|
||||
|
||||
|
||||
class AgentObservabilityConfig(BaseModel):
|
||||
"""Observability & Monitoring configuration for an AI agent."""
|
||||
|
||||
execution_logs_complete: bool = Field(
|
||||
default=False,
|
||||
description="Whether execution logs capture actions, tools, decisions, and outputs",
|
||||
)
|
||||
anomaly_detection_enabled: bool = Field(
|
||||
default=False,
|
||||
description="Whether anomaly detection monitors for unusual agent behaviour",
|
||||
)
|
||||
prompt_injection_monitoring: bool = Field(
|
||||
default=False,
|
||||
description="Whether LLM inputs are monitored for prompt injection / jailbreak attempts",
|
||||
)
|
||||
audit_logs_immutable: bool = Field(
|
||||
default=False,
|
||||
description="Whether audit logs are immutable and integrity-protected",
|
||||
)
|
||||
metrics_exported: bool = Field(
|
||||
default=False,
|
||||
description="Whether key metrics (latency, error rate, resource usage) are exported",
|
||||
)
|
||||
security_event_alerting: bool = Field(
|
||||
default=False,
|
||||
description="Whether security events trigger alerts within 5 minutes",
|
||||
)
|
||||
distributed_tracing_enabled: bool = Field(
|
||||
default=False,
|
||||
description="Whether W3C trace context is propagated across agent service calls",
|
||||
)
|
||||
centralized_dashboard: bool = Field(
|
||||
default=False,
|
||||
description="Whether a centralised dashboard shows agent security posture",
|
||||
)
|
||||
configuration_drift_tracked: bool = Field(
|
||||
default=False,
|
||||
description="Whether configuration changes are tracked and drift from baseline detected",
|
||||
)
|
||||
performance_baseline_defined: bool = Field(
|
||||
default=False,
|
||||
description="Whether a performance baseline exists and degradation triggers alerts",
|
||||
)
|
||||
|
||||
|
||||
class AgentComplianceConfig(BaseModel):
|
||||
"""Compliance & Governance configuration for an AI agent."""
|
||||
|
||||
owasp_llm_top10_assessed: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent has been assessed against the OWASP LLM Top 10",
|
||||
)
|
||||
eu_ai_act_controls_present: bool = Field(
|
||||
default=False,
|
||||
description="Whether EU AI Act compliance controls are documented",
|
||||
)
|
||||
nist_ai_rmf_assessed: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent has been assessed against the NIST AI RMF",
|
||||
)
|
||||
access_control_policy_enforced: bool = Field(
|
||||
default=False,
|
||||
description="Whether a documented access control policy is enforced and audited",
|
||||
)
|
||||
dpia_completed: bool = Field(
|
||||
default=False,
|
||||
description="Whether a Data Privacy Impact Assessment has been completed",
|
||||
)
|
||||
regulatory_requirements_mapped: bool = Field(
|
||||
default=False,
|
||||
description="Whether applicable regulations (HIPAA, PCI-DSS, etc.) are mapped",
|
||||
)
|
||||
incident_response_plan_exists: bool = Field(
|
||||
default=False,
|
||||
description="Whether an agent-specific incident response plan exists and is tested",
|
||||
)
|
||||
third_party_vendors_assessed: bool = Field(
|
||||
default=False,
|
||||
description="Whether third-party agent vendors have been security-assessed (SOC 2, ISO 27001)",
|
||||
)
|
||||
user_consent_and_disclosure: bool = Field(
|
||||
default=False,
|
||||
description="Whether users are informed and consent to agent actions on their behalf",
|
||||
)
|
||||
|
||||
|
||||
class AgentAttackPathsConfig(BaseModel):
|
||||
"""Attack Path Analysis configuration for an AI agent."""
|
||||
|
||||
cross_cloud_escalation_possible: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent can chain identities to escalate privileges across clouds",
|
||||
)
|
||||
tool_abuse_escalation_possible: bool = Field(
|
||||
default=False,
|
||||
description="Whether agent tools can be abused to exceed the agent's declared permissions",
|
||||
)
|
||||
sensitive_data_enables_downstream_compromise: bool = Field(
|
||||
default=False,
|
||||
description="Whether data accessible to the agent contains credentials or social-engineering material",
|
||||
)
|
||||
lateral_movement_via_shared_infra: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent can access sibling agent infrastructure / shared services",
|
||||
)
|
||||
compromise_enables_full_account_takeover: bool = Field(
|
||||
default=False,
|
||||
description="Whether a compromised agent credential chain could lead to full account takeover",
|
||||
)
|
||||
llm_output_used_in_code_execution: bool = Field(
|
||||
default=False,
|
||||
description="Whether LLM output is used directly in system calls or exec() without validation",
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Top-level Agent model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class AgentConfig(BaseModel):
|
||||
"""Full security posture declaration for a single AI agent deployment."""
|
||||
|
||||
id: str = Field(description="Unique identifier for this agent deployment")
|
||||
name: str = Field(description="Human-readable agent name")
|
||||
environment: str = Field(
|
||||
default="unknown",
|
||||
description="Deployment environment: prod | staging | dev | unknown",
|
||||
)
|
||||
cloud_provider: str = Field(
|
||||
default="unknown",
|
||||
description="Primary cloud provider: aws | azure | gcp | unknown",
|
||||
)
|
||||
region: str = Field(default="global", description="Primary deployment region")
|
||||
|
||||
identity: AgentIdentityConfig = Field(default_factory=AgentIdentityConfig)
|
||||
permissions: AgentPermissionsConfig = Field(default_factory=AgentPermissionsConfig)
|
||||
credentials: AgentCredentialsConfig = Field(default_factory=AgentCredentialsConfig)
|
||||
network: AgentNetworkConfig = Field(default_factory=AgentNetworkConfig)
|
||||
data_access: AgentDataAccessConfig = Field(default_factory=AgentDataAccessConfig)
|
||||
runtime: AgentRuntimeConfig = Field(default_factory=AgentRuntimeConfig)
|
||||
supply_chain: AgentSupplyChainConfig = Field(default_factory=AgentSupplyChainConfig)
|
||||
observability: AgentObservabilityConfig = Field(
|
||||
default_factory=AgentObservabilityConfig
|
||||
)
|
||||
compliance: AgentComplianceConfig = Field(default_factory=AgentComplianceConfig)
|
||||
attack_paths: AgentAttackPathsConfig = Field(default_factory=AgentAttackPathsConfig)
|
||||
|
||||
def dict(self, **kwargs):
|
||||
"""Return a serialisable dict (used by Check_Report)."""
|
||||
return super().model_dump(**kwargs)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Output options
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ASPMOutputOptions(ProviderOutputOptions):
|
||||
"""ASPM-specific output options."""
|
||||
|
||||
def __init__(self, arguments, bulk_checks_metadata):
|
||||
super().__init__(arguments, bulk_checks_metadata)
|
||||
if not getattr(arguments, "output_filename", None):
|
||||
self.output_filename = f"prowler-output-aspm-{output_file_timestamp}"
|
||||
else:
|
||||
self.output_filename = arguments.output_filename
|
||||
0
prowler/providers/aspm/services/__init__.py
Normal file
0
prowler/providers/aspm/services/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_compromise_blast_radius_contained",
|
||||
"CheckTitle": "AI Agent Compromise Blast Radius Must Be Contained — Full Account Takeover Must Not Be Possible",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Evaluates whether compromising the AI agent's credential chain could allow an attacker to achieve full cloud account takeover. This occurs when the agent's permissions — either directly or through role chaining — reach account-administrative actions such as creating IAM principals, modifying SCPs, disabling audit trails, or deleting critical infrastructure. A well-scoped agent should have a contained blast radius limited strictly to its functional domain.",
|
||||
"Risk": "An agent with an unbounded blast radius transforms every prompt injection, supply-chain attack, or runtime exploit targeting the agent into a potential full account takeover. A single compromised agent can be used to create backdoor IAM users, disable CloudTrail, exfiltrate all secrets, and destroy production resources — all under the agent's legitimate identity with no immediate IAM-level alert.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Apply a permission boundary to the agent's IAM role that prevents any account-administrative action (iam:CreateUser, iam:AttachRolePolicy, cloudtrail:StopLogging, organizations:*).\n2. Remove all attached admin or power-user managed policies and replace with a narrow functional policy.\n3. Enable AWS Service Control Policies (SCPs) or Azure Policy assignments that deny high-risk actions from agent identities.\n4. Conduct a blast-radius analysis using CIEM tooling (e.g., Wiz, Orca, Ermetic) and remediate all paths to account-level impact.\n5. Implement break-glass alerting: any attempt by the agent identity to call account-administrative APIs should page on-call immediately.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Apply permission boundaries and SCPs to hard-cap the agent's maximum achievable permissions at the account level. Remove any direct or indirect path to account-administrative actions. Validate the blast radius using CIEM tooling after every permission change.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_compromise_blast_radius_contained"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-100 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
"""ASPM-100: AI agent compromise blast radius must be contained — full account takeover must not be possible."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_compromise_blast_radius_contained(Check):
|
||||
"""Check that compromising the agent cannot lead to full cloud account takeover.
|
||||
|
||||
Blast radius refers to the maximum damage achievable if an agent is fully
|
||||
compromised. When an agent's credential chain — through role chaining,
|
||||
permission boundaries being absent, or admin-level API access — allows an
|
||||
attacker to reach account-level administrative actions (e.g., creating new
|
||||
IAM users, disabling CloudTrail, deleting all resources), the blast radius
|
||||
is effectively unbounded. This check verifies that such a path does not
|
||||
exist.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.compromise_enables_full_account_takeover:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} compromise could enable full cloud account "
|
||||
"takeover — critical blast radius."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} compromise blast radius is contained — full "
|
||||
"account takeover is not possible."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_data_access_no_downstream_compromise",
|
||||
"CheckTitle": "AI Agent Must Not Be Able to Access Data That Enables Downstream System or Human Compromise",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Verifies that data stores accessible to the AI agent do not contain credentials, API keys, internal tokens, or social-engineering material (employee PII, org charts, internal process documentation). When an agent can read such material, an adversary who manipulates the agent's reasoning — via prompt injection or a poisoned knowledge base — can exfiltrate high-value secrets and use them to compromise downstream systems or people.",
|
||||
"Risk": "An agent with access to credential-rich or socially-exploitable data becomes a one-stop reconnaissance tool. An attacker who achieves even read-level influence over the agent can harvest database passwords, internal API keys, employee contact details, and internal playbooks — enabling cascading compromise far beyond the agent's own cloud permissions.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Apply DLP (Data Loss Prevention) scanning to all data stores the agent reads to detect and block credentials and PII.\n2. Restrict the agent's read permissions to only the specific data objects it needs — avoid granting access to entire buckets, databases, or document libraries.\n3. Redact or tokenize sensitive fields (passwords, API keys, SSNs) before they are included in agent context windows or RAG retrievals.\n4. Implement output filtering to prevent the agent from returning raw credential strings or PII in its responses.\n5. Regularly audit the agent's accessible data scope using CSPM and CIEM tooling.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Scope the agent's data access to the minimum required objects and apply DLP controls to detect and block sensitive material from entering or leaving the agent's context. Redact credentials and PII at the data layer before they reach the agent.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_data_access_no_downstream_compromise"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-098 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
"""ASPM-098: AI agent must not be able to access data that enables downstream compromise."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_data_access_no_downstream_compromise(Check):
|
||||
"""Check that AI agent-accessible data does not contain credentials or social-engineering material.
|
||||
|
||||
When an agent can read data stores containing credentials (database
|
||||
connection strings, API keys, internal tokens) or material that can be
|
||||
used for social engineering (employee PII, org-chart details, internal
|
||||
process documentation), an attacker who manipulates the agent's reasoning
|
||||
can harvest that material and use it to compromise downstream systems or
|
||||
humans — completely outside the agent's declared scope.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.sensitive_data_enables_downstream_compromise:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} can access data containing credentials or "
|
||||
"social-engineering material enabling downstream compromise."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} accessible data does not contain credentials "
|
||||
"or social-engineering material."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_llm_output_not_executed",
|
||||
"CheckTitle": "AI Agent Must Not Execute LLM Output Directly in System Calls or eval()",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Detects whether the AI agent passes raw LLM-generated text directly to code-execution primitives such as eval(), exec(), subprocess.run(shell=True), os.system(), or equivalent functions without sanitisation and structural allow-listing. When LLM output is executed verbatim, a successful prompt injection — delivered via a poisoned document, a malicious tool response, or an adversarial user prompt — becomes a Remote Code Execution (RCE) vulnerability running under the agent's cloud identity.",
|
||||
"Risk": "Direct execution of LLM output converts every successful prompt injection into full Remote Code Execution on the agent host. An attacker who controls any input that reaches the LLM — retrieved document, user message, tool response — can instruct the model to emit a payload that the agent will execute verbatim. The resulting RCE runs under the agent's cloud identity, granting the attacker access to all cloud resources, secrets, and downstream systems the agent can reach.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Never pass raw LLM output to eval(), exec(), subprocess, or shell commands — treat all LLM output as untrusted user input.\n2. Use structured output formats (JSON with a strict schema) instead of free-form text when the agent must drive code execution.\n3. Validate and allow-list LLM-generated structured payloads against a predefined schema before any execution step.\n4. Route all code execution through a sandboxed execution environment (gVisor, Firecracker, WebAssembly) with no network access and scoped file system access.\n5. Implement prompt injection detection on all LLM inputs (user messages, retrieved documents, tool responses) and refuse execution when injection patterns are detected.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Treat LLM output as untrusted input. Never pass it directly to code-execution primitives. Require structured, schema-validated payloads for any execution step, run code in isolated sandboxes, and implement prompt injection detection to prevent adversarial payloads from reaching the execution layer.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_llm_output_not_executed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-101 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
"""ASPM-101: AI agent must not execute LLM output directly in system calls or eval()."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_llm_output_not_executed(Check):
|
||||
"""Check that AI agents do not execute LLM output directly in system calls or eval().
|
||||
|
||||
When an agent passes raw LLM-generated text to system calls, eval(),
|
||||
exec(), subprocess.run(shell=True), or equivalent code-execution
|
||||
primitives without sanitisation and allow-listing, an adversary who
|
||||
controls any part of the LLM's prompt — through prompt injection, a
|
||||
poisoned document, or a malicious tool response — can achieve Remote Code
|
||||
Execution (RCE) on the agent host with the agent's privileges.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.llm_output_used_in_code_execution:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} uses LLM output directly in code execution — "
|
||||
"prompt injection can achieve RCE."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} does not execute LLM output directly in "
|
||||
"system calls or eval()."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_no_cross_cloud_escalation",
|
||||
"CheckTitle": "AI Agent Must Not Have a Cross-Cloud Identity Chain Enabling Privilege Escalation",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Detects whether an AI agent's identity in one cloud provider can be used to assume a higher-privileged role in another cloud provider, creating a cross-cloud privilege escalation path. An attacker who compromises the agent can silently pivot between clouds, bypassing the least-privilege controls of each environment in isolation.",
|
||||
"Risk": "A cross-cloud escalation path allows an attacker with initial access to the agent's lower-privileged identity in one cloud to obtain administrative or broad permissions in a second cloud. This renders the security boundaries of both clouds ineffective and can result in full multi-cloud environment compromise from a single initial foothold.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Audit all trust relationships and federation configurations between cloud identities used by this agent.\n2. Remove or tighten cross-cloud trust policies (e.g., AWS IAM OIDC provider conditions, GCP Workload Identity Pool conditions).\n3. Enforce the principle of least privilege independently in every cloud; do not allow a low-privilege identity in Cloud A to assume a high-privilege role in Cloud B.\n4. Implement explicit deny conditions on cross-cloud AssumeRole / impersonation calls that exceed the agent's declared permission scope.\n5. Alert on any new cross-cloud federation configuration changes via CSPM or CIEM tooling.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Remove cross-cloud identity trust relationships that allow the agent to escalate privileges beyond its declared scope. Apply equivalent least-privilege constraints to every cloud the agent interacts with, and monitor federation configuration changes continuously.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_no_cross_cloud_escalation"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-096 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
"""ASPM-096: AI agent must not have a cross-cloud identity chain enabling privilege escalation."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_no_cross_cloud_escalation(Check):
|
||||
"""Check that AI agents cannot chain identities across cloud providers to escalate privileges.
|
||||
|
||||
Cross-cloud identity chaining occurs when an agent's identity in one cloud
|
||||
can be used to assume a more privileged role in another cloud (e.g., an AWS
|
||||
IAM role trusted by a GCP service account that has broader permissions).
|
||||
This creates an attack path that bypasses the least-privilege controls of
|
||||
either cloud in isolation.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.cross_cloud_escalation_possible:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} can chain identities across cloud providers "
|
||||
"to escalate privileges."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no cross-cloud identity chain enabling "
|
||||
"lateral privilege escalation."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_no_lateral_movement_via_shared_infra",
|
||||
"CheckTitle": "AI Agent Must Not Be Able to Access Shared Infrastructure Used by Sibling Agents",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Detects whether an AI agent can access infrastructure components — message queues, shared databases, configuration services, secret stores, or file systems — that are also used by sibling agents in the same deployment. Shared infrastructure creates a lateral movement path: compromise of one agent becomes a stepping stone to attacking all agents that share those resources.",
|
||||
"Risk": "In multi-agent systems, shared infrastructure is the primary lateral movement surface. An attacker who compromises a lower-privilege agent can read messages, inject tasks, or overwrite configuration intended for a higher-privilege sibling agent, effectively taking over the entire agent fleet from a single initial breach point.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Assign each agent its own isolated message queue, database schema, and secret store — avoid any shared-resource pattern.\n2. Enforce network-level isolation between agent workloads using security groups, VPC subnet segregation, or Kubernetes NetworkPolicies.\n3. Use separate IAM identities per agent so that an identity compromise cannot access another agent's resources.\n4. Implement cross-agent communication via authenticated and signed API calls rather than shared data stores.\n5. Audit IAM policies and resource policies (SQS, S3, DynamoDB) to ensure no agent can access resources owned by sibling agents.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Eliminate shared infrastructure between agents. Provide each agent with dedicated, isolated resources and enforce network and IAM boundaries that prevent any cross-agent resource access. Use authenticated APIs for legitimate inter-agent communication.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_no_lateral_movement_via_shared_infra"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-099 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
"""ASPM-099: AI agent must not be able to access shared infrastructure used by sibling agents."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_no_lateral_movement_via_shared_infra(Check):
|
||||
"""Check that AI agents cannot access sibling agent infrastructure or shared credential stores.
|
||||
|
||||
When multiple agents share infrastructure components — message queues,
|
||||
databases, secret stores, shared file systems, or configuration services —
|
||||
compromise of one agent can be used as a pivot to attack adjacent agents.
|
||||
This lateral movement path is especially dangerous in multi-agent
|
||||
orchestration systems where agent-to-agent trust is implicit.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.lateral_movement_via_shared_infra:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} can access shared infrastructure used by "
|
||||
"sibling agents — lateral movement risk."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} cannot access sibling agent infrastructure "
|
||||
"or shared credential stores."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_tools_cannot_escalate",
|
||||
"CheckTitle": "AI Agent Tools Must Not Be Abusable to Escalate Beyond Declared Permissions",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "attack_paths",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Attack Path**. Verifies that the tools granted to an AI agent cannot be chained together or individually abused to perform actions beyond the agent's declared IAM/permission scope. Tool abuse includes prompt injection that redirects tool calls, SSRF through HTTP-fetching tools, insecure deserialization via file-parsing tools, and shell-escape vulnerabilities in code-execution sandboxes.",
|
||||
"Risk": "When agent tools can be abused, an attacker who controls any part of the agent's input (prompt, retrieved document, API response) can coerce the agent into executing arbitrary commands, exfiltrating data, or invoking cloud APIs far beyond the agent's intended scope — all under the agent's legitimate identity and without triggering IAM-level alerts.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Enumerate every tool available to the agent and perform a tool-specific threat model (SSRF, shell injection, insecure deserialization).\n2. Apply input validation and allowlisting to all tool parameters before execution.\n3. Run code-execution tools in strict sandboxes (gVisor, Firecracker, seccomp profiles) with no network access unless explicitly required.\n4. Implement tool call signing and validation so the orchestration layer can detect tampered tool invocations.\n5. Log all tool invocations with full parameter payloads and alert on anomalous combinations.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Perform a threat model for every tool granted to the agent. Apply strict input validation, sandboxing, and allowlisted tool parameters. Log and alert on all tool invocations to detect abuse chains at runtime.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_tools_cannot_escalate"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trust-boundaries",
|
||||
"internet-exposed"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-097 attack path check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
"""ASPM-097: AI agent tools must not be abusable to escalate beyond the agent's declared permissions."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_client import (
|
||||
attack_paths_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_tools_cannot_escalate(Check):
|
||||
"""Check that AI agent tools cannot be chained or abused to exceed the agent's declared permissions.
|
||||
|
||||
Agents are granted a set of tools (e.g., code-execution sandboxes, shell
|
||||
utilities, file-system accessors, API wrappers). If those tools can be
|
||||
composed or abused — for example, through prompt injection, insecure
|
||||
deserialization, or SSRF — an attacker can achieve actions far beyond what
|
||||
the agent's IAM policy technically allows. This check verifies that no
|
||||
such tool-abuse escalation path has been identified.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in attack_paths_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.attack_paths.tool_abuse_escalation_possible:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} tools can be chained or abused to escalate "
|
||||
"beyond declared permissions."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} tools cannot be abused to exceed the agent's "
|
||||
"declared permissions."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,7 @@
|
||||
"""ASPM Attack Paths service client singleton."""
|
||||
|
||||
from prowler.providers.aspm.services.attack_paths.attack_paths_service import (
|
||||
AttackPaths,
|
||||
)
|
||||
|
||||
attack_paths_client = AttackPaths
|
||||
@@ -0,0 +1,20 @@
|
||||
"""ASPM Attack Paths service."""
|
||||
|
||||
from prowler.providers.aspm.aspm_provider import AspmProvider
|
||||
from prowler.providers.aspm.lib.service.service import AspmService
|
||||
|
||||
|
||||
class AttackPaths(AspmService):
|
||||
"""Service for AI agent attack path analysis.
|
||||
|
||||
Inherits the agent list from AspmService and is used by all ASPM attack
|
||||
path checks (ASPM-096 through ASPM-101).
|
||||
"""
|
||||
|
||||
def __init__(self, provider: AspmProvider) -> None:
|
||||
"""Initialise the AttackPaths service.
|
||||
|
||||
Args:
|
||||
provider: The active AspmProvider instance.
|
||||
"""
|
||||
super().__init__(provider)
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_access_control_policy_enforced",
|
||||
"CheckTitle": "AI agent must have a documented access control policy that is enforced and audited",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent operates under a documented access control policy defining who or what may interact with the agent, what actions are permitted, under which conditions, and that the policy is technically enforced and access events are audited.",
|
||||
"Risk": "An absent or unenforced access control policy means there is no defined boundary for agent interactions. Unauthorised principals may invoke agent capabilities, access sensitive data processed by the agent, or trigger actions with financial or operational consequences, all without detection.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Define a formal access control policy for the agent specifying permitted callers, allowed actions, and applicable conditions.\n2. Implement the policy using IAM roles, API gateway authorisers, or equivalent mechanisms.\n3. Enable audit logging for all access attempts (successful and denied) to an immutable log store.\n4. Review access logs regularly for anomalous patterns.\n5. Review and update the policy at least annually or on personnel/system changes.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Document, enforce, and audit an access control policy for each AI agent. Ensure the policy is technically enforced at the API or IAM layer and that access events are captured in an immutable audit log.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_access_control_policy_enforced"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-090 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
"""ASPM-090: AI agent must have a documented access control policy that is enforced and audited."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_access_control_policy_enforced(Check):
|
||||
"""Check that each AI agent has an enforced and audited access control policy.
|
||||
|
||||
Access control policies define who or what may interact with an agent, what
|
||||
actions are permitted, and under which conditions. Without enforcement and
|
||||
audit trails, unauthorised access to agent capabilities or the data it
|
||||
processes can go undetected.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.access_control_policy_enforced:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} lacks an enforced access control policy — "
|
||||
f"governance controls are absent."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has a documented access control policy that "
|
||||
f"is enforced and audited."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_dpia_completed",
|
||||
"CheckTitle": "AI agent processing personal data must have a completed Data Privacy Impact Assessment",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent that processes personal data has a completed and documented Data Privacy Impact Assessment (DPIA), identifying privacy risks, documenting mitigations, and demonstrating compliance with GDPR Article 35, CCPA, and equivalent privacy regulations.",
|
||||
"Risk": "Processing personal data without a DPIA exposes the organisation to GDPR enforcement action including fines of up to 4% of global annual turnover (or EUR 20M, whichever is higher). AI agents often process sensitive personal data as part of their normal operation, making DPIA completion a critical compliance obligation.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Determine whether the agent's processing activities meet the GDPR Article 35 threshold for mandatory DPIA (large-scale processing, systematic monitoring, sensitive categories).\n2. Conduct a DPIA using a structured methodology (e.g. CNIL PIA tool, ICO DPIA template).\n3. Document: processing purposes, necessity and proportionality, risks to data subjects, and mitigations.\n4. Consult with your Data Protection Officer (DPO) and, if required, the supervisory authority.\n5. Review and update the DPIA whenever the processing nature or risks change significantly.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Complete and document a DPIA for each AI agent that processes personal data before deployment. Involve your Data Protection Officer, document all identified risks and mitigations, and refresh the DPIA on significant changes.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_dpia_completed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-091 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
"""ASPM-091: AI agent must have a completed Data Privacy Impact Assessment (DPIA)."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_dpia_completed(Check):
|
||||
"""Check that each AI agent has a completed Data Privacy Impact Assessment.
|
||||
|
||||
A DPIA is mandatory under GDPR Article 35 for high-risk processing activities
|
||||
and strongly recommended under CCPA and other privacy regulations. It identifies
|
||||
privacy risks, documents mitigations, and demonstrates accountability to
|
||||
supervisory authorities. Failure to complete a DPIA exposes organisations to
|
||||
significant regulatory fines and reputational damage.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.dpia_completed:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} processes personal data without a DPIA — "
|
||||
f"GDPR/CCPA compliance risk."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Agent {agent.name} has a completed Data Privacy Impact Assessment."
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_eu_ai_act_controls",
|
||||
"CheckTitle": "AI agent deployed in the EU must have documented EU AI Act compliance controls",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent has documented compliance controls aligned with the EU AI Act, including transparency obligations, human oversight mechanisms, risk management systems, and conformity assessments where required for high-risk AI systems.",
|
||||
"Risk": "The EU AI Act imposes binding obligations on providers and deployers of AI systems in the EU market. Non-compliance with requirements for high-risk AI systems can result in fines of up to 3% of global annual turnover (or EUR 15M, whichever is higher), enforcement action, and mandatory market withdrawal.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Classify the AI agent under the EU AI Act risk tiers (unacceptable, high, limited, minimal).\n2. For high-risk systems, implement conformity assessment procedures per Annex VI or VII.\n3. Document transparency obligations: inform users they are interacting with an AI system.\n4. Implement human oversight mechanisms allowing intervention and override.\n5. Establish a risk management system covering the entire AI lifecycle.\n6. Maintain technical documentation as required by Article 11 and Annex IV.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Classify each AI agent under the EU AI Act, implement required controls for the applicable risk tier, and maintain documentation demonstrating conformity. Engage a qualified legal and technical team to perform a gap analysis.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_eu_ai_act_controls"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-088 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-088: AI agent must have EU AI Act compliance controls documented."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_eu_ai_act_controls(Check):
|
||||
"""Check that each AI agent has EU AI Act compliance controls documented.
|
||||
|
||||
The EU AI Act imposes requirements on high-risk AI systems deployed in the
|
||||
European Union, including transparency obligations, human oversight mechanisms,
|
||||
and risk management systems. Agents without documented controls face regulatory
|
||||
penalties and potential enforcement action.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.eu_ai_act_controls_present:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} lacks EU AI Act compliance controls — "
|
||||
f"regulatory risk for EU-deployed agents."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has EU AI Act compliance controls documented."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_incident_response_plan_exists",
|
||||
"CheckTitle": "AI agent must have a tested, agent-specific incident response plan",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent has a documented and tested incident response plan covering agent-specific failure modes such as prompt injection attacks, model misbehaviour, autonomous action errors, and credential compromise, including containment, eradication, and recovery procedures.",
|
||||
"Risk": "Generic IT incident response plans rarely cover AI-specific failure modes. Without an agent-specific plan, teams facing a prompt injection attack or autonomous action incident have no playbook to follow, leading to delayed containment, extended impact, and potentially irreversible consequences from autonomous agent actions.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Identify AI-specific incident scenarios: prompt injection, model hallucination causing harm, autonomous action abuse, data exfiltration via LLM output, credential compromise.\n2. For each scenario, document detection signals, containment steps (including agent shutdown procedures), eradication, and recovery.\n3. Define roles and responsibilities, escalation paths, and communication templates.\n4. Integrate the plan with the organisation's overarching incident response procedure.\n5. Conduct tabletop exercises at least annually to validate the plan.\n6. Update the plan after every significant incident or architectural change.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Develop and test an agent-specific incident response plan covering AI failure modes. Conduct annual tabletop exercises and update the plan after incidents or significant architectural changes.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_incident_response_plan_exists"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-093 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
"""ASPM-093: AI agent must have a tested, agent-specific incident response plan."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_incident_response_plan_exists(Check):
|
||||
"""Check that each AI agent has a tested, agent-specific incident response plan.
|
||||
|
||||
AI agents introduce novel failure modes including prompt injection, model
|
||||
misbehaviour, and autonomous action errors that may not be covered by generic
|
||||
IT incident response plans. An agent-specific plan ensures teams know how to
|
||||
contain, eradicate, and recover from agent-related incidents swiftly.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.incident_response_plan_exists:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no incident response plan — the organisation "
|
||||
f"is unprepared for agent-related incidents."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Agent {agent.name} has a tested, agent-specific incident response plan."
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_nist_ai_rmf_assessed",
|
||||
"CheckTitle": "AI agent must be assessed against the NIST AI Risk Management Framework",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent has been assessed against the NIST AI Risk Management Framework (AI RMF 1.0), covering the GOVERN, MAP, MEASURE, and MANAGE functions, to ensure structured risk identification, measurement, and mitigation across the AI lifecycle.",
|
||||
"Risk": "Without an NIST AI RMF assessment, organisations lack a structured approach to identifying and managing AI-specific risks including bias, robustness failures, and security vulnerabilities. This can lead to undiscovered risks becoming operational incidents, particularly for agents in sensitive or regulated environments.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Obtain the NIST AI RMF 1.0 publication from nist.gov/artificial-intelligence.\n2. Perform a structured assessment covering GOVERN (policies), MAP (context and risk identification), MEASURE (risk analysis), and MANAGE (risk treatment).\n3. Document outcomes and assign risk owners for each identified AI risk.\n4. Integrate AI RMF assessment into the agent's development and deployment lifecycle.\n5. Schedule periodic re-assessment (at least annually or on major model changes).",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Conduct and document an NIST AI RMF assessment for each AI agent, covering all four core functions. Use the assessment outputs to drive risk treatment decisions and governance improvements.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_nist_ai_rmf_assessed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-089 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-089: AI agent must be assessed against the NIST AI Risk Management Framework."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_nist_ai_rmf_assessed(Check):
|
||||
"""Check that each AI agent has been assessed against the NIST AI RMF.
|
||||
|
||||
The NIST AI Risk Management Framework (AI RMF 1.0) provides guidance to
|
||||
organisations for managing risks associated with AI systems across the
|
||||
entire AI lifecycle. Assessment against the framework demonstrates that
|
||||
governance, mapping, measurement, and management functions are in place.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.nist_ai_rmf_assessed:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has not been assessed against the NIST AI RMF."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has been assessed against the NIST AI Risk "
|
||||
f"Management Framework."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_owasp_llm_top10_assessed",
|
||||
"CheckTitle": "AI agent must be assessed against the OWASP LLM Top 10 with documented mitigations",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent has been formally assessed against the OWASP Top 10 for Large Language Model Applications and that mitigations are documented for all applicable risks, including prompt injection, insecure output handling, training data poisoning, and model denial of service.",
|
||||
"Risk": "Without an OWASP LLM Top 10 assessment, common and well-understood attack vectors against AI agents remain unaddressed. Prompt injection and insecure output handling are regularly exploited in the wild and can lead to data exfiltration, privilege escalation, or complete agent compromise.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Download the OWASP Top 10 for LLM Applications from owasp.org.\n2. Conduct a structured assessment of each LLM-powered agent against all 10 categories.\n3. Document findings and applicable risks in the agent's security design document.\n4. Implement mitigations (input validation, output encoding, rate limiting, etc.) for each applicable risk.\n5. Schedule re-assessment annually or on significant model or architecture changes.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Perform and document an OWASP LLM Top 10 assessment for each AI agent, including risk ratings and implemented mitigations. Integrate assessment checkpoints into the AI agent development lifecycle.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_owasp_llm_top10_assessed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-087 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
"""ASPM-087: AI agent must be assessed against the OWASP LLM Top 10."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_owasp_llm_top10_assessed(Check):
|
||||
"""Check that each AI agent has been assessed against the OWASP LLM Top 10.
|
||||
|
||||
The OWASP LLM Top 10 identifies the most critical security risks for
|
||||
applications using large language models. An assessment ensures that common
|
||||
attack vectors such as prompt injection, insecure output handling, and
|
||||
training data poisoning are actively mitigated.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.owasp_llm_top10_assessed:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Agent {agent.name} has not been assessed against the OWASP LLM Top 10."
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has been assessed against the OWASP LLM Top 10 "
|
||||
f"with documented mitigations."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_regulatory_requirements_mapped",
|
||||
"CheckTitle": "AI agent must have all applicable regulatory requirements mapped to controls",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent has a documented mapping of all applicable regulatory requirements (e.g. HIPAA for healthcare data, PCI-DSS for payment processing, SOX for financial controls) to implemented technical and organisational controls, with gap analysis and remediation tracking.",
|
||||
"Risk": "Without regulatory requirement mapping, compliance gaps remain invisible until discovered during audits or incidents. For AI agents operating in regulated industries, undetected gaps can lead to substantial fines, mandatory remediation programmes, and in some cases criminal liability for responsible officers.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Identify all industries and jurisdictions in which the agent operates.\n2. Enumerate applicable regulations (HIPAA, PCI-DSS, SOX, GDPR, CCPA, FedRAMP, etc.).\n3. Create a regulatory control matrix mapping each requirement to implemented controls.\n4. Conduct a gap analysis to identify unmet requirements.\n5. Implement a remediation plan with owners and target dates for all gaps.\n6. Review the mapping at least annually and on regulatory changes.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Maintain a living regulatory control matrix for each AI agent, mapping all applicable regulatory requirements to implemented controls. Conduct regular gap analyses and track remediation to closure.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_regulatory_requirements_mapped"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-092 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
"""ASPM-092: AI agent must have all applicable regulatory requirements mapped."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_regulatory_requirements_mapped(Check):
|
||||
"""Check that each AI agent has applicable regulatory requirements mapped.
|
||||
|
||||
AI agents operating in regulated industries must identify and map all
|
||||
applicable regulatory requirements (e.g. HIPAA for healthcare, PCI-DSS for
|
||||
payment processing, SOX for financial reporting). Without this mapping,
|
||||
compliance gaps may remain undetected until an audit or incident exposes them.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.regulatory_requirements_mapped:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} lacks regulatory requirement mapping — "
|
||||
f"compliance gaps may exist."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has all applicable regulatory requirements "
|
||||
f"(HIPAA, PCI-DSS, etc.) mapped."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_third_party_vendors_assessed",
|
||||
"CheckTitle": "Third-party vendors used by the AI agent must be security-assessed (SOC 2, ISO 27001)",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that all third-party vendors (LLM API providers, tool integrations, data processors) used by each AI agent have been formally security-assessed, with valid SOC 2 Type II reports, ISO 27001 certificates, or equivalent assurance documentation on file.",
|
||||
"Risk": "Third-party vendors with inadequate security controls can compromise the confidentiality, integrity, and availability of data processed by the agent. Supply-chain attacks against AI service providers are increasingly common. Without vendor assessments, organisations cannot make informed risk decisions about the third-party dependencies their agents rely on.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Maintain an inventory of all third-party vendors used by each AI agent.\n2. For each vendor, obtain and review current SOC 2 Type II reports, ISO 27001 certificates, or equivalent assurance documentation.\n3. Assess vendor security posture against your organisation's third-party risk criteria.\n4. Document assessment results, findings, and accepted residual risks.\n5. Establish contractual security requirements (DPA, security addendum) with each vendor.\n6. Schedule annual re-assessment and monitor for vendor security incidents.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Maintain an assessed vendor inventory for each AI agent. Obtain SOC 2 Type II or ISO 27001 assurance for all significant third-party dependencies and conduct annual re-assessments.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_third_party_vendors_assessed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-094 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
"""ASPM-094: Third-party vendors used by AI agent must be security-assessed."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_third_party_vendors_assessed(Check):
|
||||
"""Check that third-party vendors used by each AI agent have been security-assessed.
|
||||
|
||||
AI agents commonly rely on third-party LLM APIs, tool providers, and data
|
||||
processors. Each dependency introduces supply-chain risk. Security assessments
|
||||
(SOC 2 Type II, ISO 27001 certification, or equivalent) validate that vendors
|
||||
maintain appropriate security controls and reduce the risk of a supply-chain
|
||||
compromise propagating to the agent.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.third_party_vendors_assessed:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} uses third-party services without security "
|
||||
f"assessment — supply chain risk."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} third-party vendors have been security-assessed "
|
||||
f"(SOC 2, ISO 27001)."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_user_consent_and_disclosure",
|
||||
"CheckTitle": "AI agent must disclose its actions to users and obtain appropriate consent",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "compliance",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Compliance**. Verifies that each AI agent discloses to users that they are interacting with or delegating to an AI system, clearly communicates what actions the agent will take on their behalf, and obtains appropriate and documented consent prior to performing consequential actions.",
|
||||
"Risk": "Agents acting on behalf of users without disclosure or consent violate privacy regulations (GDPR, CCPA), sector-specific rules (EU AI Act transparency requirements), and ethical AI principles. Users who are unaware of agent actions cannot exercise meaningful oversight, and undisclosed AI interactions can result in regulatory enforcement and significant reputational harm.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Implement a disclosure mechanism that clearly identifies the agent as an AI system to users before or at the start of an interaction.\n2. For consequential actions (sending emails, making purchases, modifying data), present the proposed action to the user and obtain explicit confirmation before execution.\n3. Log consent events with timestamp, user identifier, and action scope.\n4. Provide users with a mechanism to review and revoke previously granted agent permissions.\n5. Review disclosure language and consent flows with legal counsel to ensure regulatory compliance.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Implement clear AI disclosure and informed consent mechanisms for each agent. Ensure users can review, approve, and revoke agent actions, and that consent events are logged for audit purposes.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_user_consent_and_disclosure"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-095 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
"""ASPM-095: AI agent must disclose its actions to users and obtain appropriate consent."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.compliance.compliance_client import (
|
||||
compliance_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_user_consent_and_disclosure(Check):
|
||||
"""Check that each AI agent discloses its actions and obtains user consent.
|
||||
|
||||
Agents acting autonomously on behalf of users must inform those users about
|
||||
what actions will be taken and obtain meaningful consent where required.
|
||||
Failure to disclose agent behaviour and obtain consent violates privacy
|
||||
regulations (GDPR, CCPA), ethics guidelines, and sector-specific rules,
|
||||
and erodes user trust in AI systems.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in compliance_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.compliance.user_consent_and_disclosure:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} acts on behalf of users without disclosure or "
|
||||
f"consent — ethics and regulatory risk."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} discloses its actions to users and obtains "
|
||||
f"appropriate consent."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,5 @@
|
||||
"""ASPM Compliance service client singleton."""
|
||||
|
||||
from prowler.providers.aspm.services.compliance.compliance_service import Compliance
|
||||
|
||||
compliance_client = Compliance
|
||||
@@ -0,0 +1,20 @@
|
||||
"""ASPM Compliance service."""
|
||||
|
||||
from prowler.providers.aspm.aspm_provider import AspmProvider
|
||||
from prowler.providers.aspm.lib.service.service import AspmService
|
||||
|
||||
|
||||
class Compliance(AspmService):
|
||||
"""Service for AI agent compliance and governance assessment.
|
||||
|
||||
Inherits the agent list from AspmService and is used by all ASPM compliance
|
||||
checks (ASPM-087 through ASPM-095).
|
||||
"""
|
||||
|
||||
def __init__(self, provider: AspmProvider) -> None:
|
||||
"""Initialise the Compliance service.
|
||||
|
||||
Args:
|
||||
provider: The active AspmProvider instance.
|
||||
"""
|
||||
super().__init__(provider)
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_credential_access_audit_trail",
|
||||
"CheckTitle": "AI agent credential access must be logged and monitored",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that the AI agent has an audit trail for credential access operations (e.g. GetSecretValue, DescribeSecret) that is logged and monitored for anomalous behaviour.",
|
||||
"Risk": "Without an audit trail for credential retrieval, it is impossible to detect if an attacker is exfiltrating secrets or if compromised credentials are being accessed from unexpected locations. This also prevents forensic reconstruction of incidents.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Enable CloudTrail (AWS), Cloud Audit Logs (GCP), or Azure Monitor for your secrets manager.\n2. Configure alerts for GetSecretValue calls from unexpected principals or IP ranges.\n3. Use AWS CloudWatch Insights or equivalent to monitor for unusual credential access patterns.\n4. Integrate secret access events into your SIEM for correlation with other security events.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable audit logging for all secrets manager operations and configure alerts for anomalous credential access patterns. Integrate secret access events with your SIEM.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_credential_access_audit_trail"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-034 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-034: AI agent credential access must be audited."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_credential_access_audit_trail(Check):
|
||||
"""Check that each AI agent logs and monitors credential access events.
|
||||
|
||||
Without an audit trail for operations such as GetSecretValue, it is
|
||||
impossible to detect unauthorised credential retrieval or to reconstruct
|
||||
the sequence of events following a security incident.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.credential_access_audit_trail:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no audit trail for credential access "
|
||||
"— cannot detect unauthorised retrieval."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credential access (GetSecretValue, etc.) "
|
||||
"is logged and monitored."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_credential_rotation_enforced",
|
||||
"CheckTitle": "AI agent credentials must be rotated at least every 90 days",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that a credential rotation policy is configured for the AI agent and that credentials rotate at most every 90 days.",
|
||||
"Risk": "Long-lived credentials accumulate over time and increase the blast radius of a compromise. Without mandatory rotation, a credential that is silently exfiltrated may remain valid indefinitely, enabling persistent attacker access.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Enable automatic rotation in your secrets manager for all agent credentials.\n2. Set the rotation schedule to 90 days or fewer.\n3. Test the rotation Lambda/function to confirm the agent continues to function after rotation.\n4. Monitor rotation events and alert on failures.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure automatic credential rotation in your secrets manager with a maximum interval of 90 days. Prefer shorter rotation intervals (30 days) for high-sensitivity credentials.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_credential_rotation_enforced"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-030 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
"""ASPM-030: AI agent credentials must rotate at most every 90 days."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
MAX_ROTATION_DAYS = 90
|
||||
|
||||
|
||||
class aspm_agent_credential_rotation_enforced(Check):
|
||||
"""Check that each AI agent rotates credentials at least every 90 days.
|
||||
|
||||
Long-lived credentials increase the blast radius of a compromise. Regular
|
||||
rotation limits exposure and is a foundational control required by CIS
|
||||
benchmarks, PCI-DSS, and most enterprise security policies.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
rotation = agent.credentials.rotation_interval_days
|
||||
if rotation is None:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no credential rotation policy configured."
|
||||
)
|
||||
elif rotation > MAX_ROTATION_DAYS:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} rotates credentials every {rotation} days, "
|
||||
f"which exceeds the maximum allowed {MAX_ROTATION_DAYS} days."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credentials rotate every {rotation} days."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_credentials_minimal_scope",
|
||||
"CheckTitle": "AI agent credentials must be scoped to minimum required permissions",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that the credentials used by the AI agent are scoped to the minimum permissions required for the agent to perform its declared functions.",
|
||||
"Risk": "Over-privileged credentials allow an attacker who compromises the agent to perform actions far beyond what the agent legitimately needs. This violates the principle of least privilege and can lead to data exfiltration, resource destruction, or lateral movement.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Use IAM Access Analyser or equivalent to identify unused permissions granted to the agent.\n2. Remove all permissions not required for current agent functions.\n3. Apply resource-level conditions to restrict access to specific ARNs or resource tags.\n4. Review agent permissions quarterly or after any functionality change.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Apply least-privilege principles to all agent credentials. Use IAM Access Analyser to identify and remove unused permissions, and review scope quarterly.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_credentials_minimal_scope"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-035 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-035: AI agent credentials must be scoped to minimum required permissions."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_credentials_minimal_scope(Check):
|
||||
"""Check that each AI agent uses credentials scoped to the minimum required permissions.
|
||||
|
||||
Over-privileged credentials amplify the impact of a compromise. Credentials
|
||||
should follow the principle of least privilege: grant only the permissions
|
||||
required for the specific tasks the agent performs.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.credentials_scoped:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credentials have excessive scope "
|
||||
"— should be restricted to minimum required."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credentials are scoped to the minimum "
|
||||
"required permissions."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_credentials_not_in_logs",
|
||||
"CheckTitle": "AI agent credentials must not appear in application logs or error messages",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that credentials, tokens, and secrets are not printed to stdout, stderr, or any structured log output by the AI agent.",
|
||||
"Risk": "Credentials in logs are collected by centralised log aggregation systems and retained for extended periods, often with broader access than the systems that hold the credentials. Log data is frequently exported to third-party SIEM platforms, widening the exposure surface significantly.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Audit application code and frameworks for logging of credential objects or HTTP headers.\n2. Implement a log sanitisation layer that redacts known secret patterns before emission.\n3. Configure structured logging libraries to exclude sensitive fields.\n4. Run a log scrubbing tool (e.g. Presidio) on log streams to detect and alert on credential patterns.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Implement a log sanitisation layer and configure structured logging to exclude credential fields. Add automated scanning of log samples to detect credential leakage.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_credentials_not_in_logs"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-027 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-027: AI agent credentials must not appear in logs."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_credentials_not_in_logs(Check):
|
||||
"""Check that each AI agent does not leak credentials into logs or error messages.
|
||||
|
||||
Credentials appearing in logs are frequently collected by log aggregation
|
||||
systems and exposed to anyone with log access, far beyond the intended
|
||||
audience for the credential.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.credentials.credentials_in_logs:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credentials are leaking into logs or "
|
||||
"error messages."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} credentials do not appear in logs or "
|
||||
"error messages."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_credentials_per_environment",
|
||||
"CheckTitle": "AI agent must use separate credentials for each deployment environment",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that the AI agent uses distinct, environment-specific credentials for development, staging, and production deployments rather than sharing credentials across environments.",
|
||||
"Risk": "Shared credentials across environments mean that a compromise in a less-controlled development or staging environment directly exposes production resources. It also prevents environment-specific access controls and makes revocation of compromised credentials disruptive to all environments simultaneously.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Create separate IAM roles, service accounts, or managed identities for each environment.\n2. Store environment-specific secrets under separate paths in your secrets manager.\n3. Update CI/CD pipelines to inject the correct environment credentials at deploy time.\n4. Audit existing credentials and revoke any that cross environment boundaries.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Create environment-specific credentials for dev, staging, and production. Use separate secrets manager paths or accounts per environment and enforce environment isolation in your CI/CD pipeline.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_credentials_per_environment"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-036 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
"""ASPM-036: AI agent must use separate credentials per environment."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_credentials_per_environment(Check):
|
||||
"""Check that each AI agent uses separate credentials per environment.
|
||||
|
||||
Shared credentials across dev, staging, and production environments mean
|
||||
that a compromise in a lower environment can directly impact production.
|
||||
Isolation reduces blast radius and simplifies revocation.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.credentials_per_environment:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} shares credentials across environments "
|
||||
"— a dev credential compromise could affect prod."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} uses separate credentials per environment."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_database_uses_proxy",
|
||||
"CheckTitle": "AI agent database connections must route through a managed proxy",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that the AI agent uses a managed database proxy (RDS Proxy, Cloud SQL Auth Proxy, Azure AD-integrated proxy) for all database connections rather than connecting directly with static credentials.",
|
||||
"Risk": "Direct database connections require the agent to hold long-lived database credentials. If the agent is compromised, the attacker gains persistent database access. Managed proxies use IAM-based token authentication, eliminating static database passwords from the agent.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Enable RDS Proxy (AWS), Cloud SQL Auth Proxy (GCP), or the appropriate managed proxy for your cloud.\n2. Configure the agent's IAM role with rds-db:connect permission for the proxy endpoint.\n3. Update the agent's database connection string to target the proxy endpoint.\n4. Remove static database credentials from the agent's configuration.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Deploy a managed database proxy and configure the agent to authenticate via IAM, eliminating the need for static database credentials in the application.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_database_uses_proxy"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-032 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
"""ASPM-032: AI agent database connections must use a managed proxy."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_database_uses_proxy(Check):
|
||||
"""Check that each AI agent uses a managed database proxy for database access.
|
||||
|
||||
Managed proxies (RDS Proxy, Cloud SQL Proxy) eliminate the need for static
|
||||
database credentials in the application by brokering connections using
|
||||
IAM-based authentication. This reduces the credential surface and enables
|
||||
connection pooling.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.database_uses_proxy:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} connects to databases without a managed proxy "
|
||||
"— static credentials are in use."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} uses a managed database proxy for "
|
||||
"credential-less database access."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_no_api_keys_in_vcs",
|
||||
"CheckTitle": "AI agent must not have API keys or tokens committed to version control",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that no API keys, access tokens, or secrets have been committed to the version control history of the AI agent's repositories.",
|
||||
"Risk": "Secrets committed to version control are permanent: even after deletion they remain in the git history and can be recovered. Automated scanners (GitGuardian, TruffleHog) continuously index both public and private repositories for such secrets.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Run Trufflehog or Gitleaks against the full repository history to identify exposed secrets.\n2. Revoke and rotate every discovered credential immediately.\n3. Use git-filter-repo to remove secrets from history, then force-push.\n4. Add .gitignore rules for secrets files and enable pre-commit secret scanning hooks.\n5. Configure your VCS provider's built-in secret scanning alerts.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable secret scanning on all repositories. Rotate any discovered credentials immediately. Add pre-commit hooks and CI gates to prevent future secret commits.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_no_api_keys_in_vcs"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-029 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-029: AI agent must not have API keys or tokens committed to version control."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_no_api_keys_in_vcs(Check):
|
||||
"""Check that each AI agent has no API keys or tokens in version control history.
|
||||
|
||||
Secrets committed to VCS persist in repository history even after removal.
|
||||
Attackers routinely scan public and leaked repositories for credentials
|
||||
using automated tooling.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.credentials.api_key_in_vcs:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has API keys or tokens found in version "
|
||||
"control history."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no API keys or tokens committed to "
|
||||
"version control."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_no_hardcoded_credentials",
|
||||
"CheckTitle": "AI agent must not contain hardcoded credentials in code, IaC, or manifests",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that no hardcoded secrets, passwords, tokens, or API keys are embedded directly in application source code, deployment manifests, or Infrastructure-as-Code templates for each AI agent.",
|
||||
"Risk": "Hardcoded credentials are trivially extracted from source repositories, container images, and build artefacts. They cannot be rotated without a code change and deployment, dramatically increasing the window of exposure after discovery. Automated secret-scanning tools routinely find such credentials within minutes of a repository being made public.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Identify all hardcoded secrets using a secret scanner (e.g. Trufflehog, Gitleaks).\n2. Remove secrets from source code and rotate all exposed credentials immediately.\n3. Store secrets in a cloud secrets manager (AWS Secrets Manager, HashiCorp Vault, Azure Key Vault).\n4. Reference secrets at runtime using the secrets manager SDK or environment injection.\n5. Add pre-commit hooks and CI pipeline gates to prevent future secret commits.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Replace all hardcoded credentials with dynamic references to a centralised secrets manager. Enable secret scanning in your VCS provider and CI/CD pipeline to catch regressions automatically.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_no_hardcoded_credentials"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-026 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-026: AI agent must not have hardcoded credentials."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_no_hardcoded_credentials(Check):
|
||||
"""Check that each AI agent has no hardcoded credentials in code, IaC, or manifests.
|
||||
|
||||
Hardcoded credentials are a critical security risk because they cannot be
|
||||
rotated without code changes and are frequently exposed via version control
|
||||
history, container images, or log output.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.credentials.has_hardcoded_secrets:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has hardcoded secrets detected in code, "
|
||||
"deployment manifests, or IaC."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no hardcoded credentials in code, "
|
||||
"IaC, or manifests."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_no_secrets_in_iac",
|
||||
"CheckTitle": "AI agent IaC templates must not contain embedded secrets",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that secrets are not embedded as plaintext values inside Terraform, CloudFormation, Pulumi, or other IaC templates used to deploy the AI agent.",
|
||||
"Risk": "Secrets in IaC templates are stored in Terraform state files, CloudFormation stacks, and VCS history — all of which have broader access than the runtime environment. Terraform state files are often stored in shared S3 buckets without field-level encryption.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Replace plaintext secret values in IaC with data source lookups from your secrets manager.\n2. For Terraform: use the aws_secretsmanager_secret_version data source.\n3. For CloudFormation: use SSM Parameter Store SecureString or Secrets Manager dynamic references.\n4. Rotate any secrets that were previously embedded in IaC.\n5. Add IaC secret scanning (Checkov, tfsec) to your CI pipeline.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Use secrets manager data sources in IaC to inject secrets at deploy time rather than embedding plaintext values. Enable Checkov or tfsec in CI to catch violations automatically.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_no_secrets_in_iac"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-031 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-031: AI agent IaC templates must not embed secrets."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_no_secrets_in_iac(Check):
|
||||
"""Check that each AI agent has no secrets embedded in Terraform or CloudFormation.
|
||||
|
||||
Secrets in IaC templates are stored in state files, shared with the entire
|
||||
IaC team, and often pushed to source control. They should be replaced with
|
||||
dynamic references to a secrets manager.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if agent.credentials.secrets_in_iac:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has secrets embedded in IaC "
|
||||
"— these should be replaced with secrets manager references."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} has no secrets embedded in Terraform or "
|
||||
"CloudFormation templates."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_third_party_keys_managed",
|
||||
"CheckTitle": "AI agent third-party API keys must be stored in a secrets manager with rotation",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that all third-party API keys (Slack, GitHub, OpenAI, payment processors, etc.) used by the AI agent are stored in and retrieved from a centralised secrets manager.",
|
||||
"Risk": "Third-party API keys grant access to external services and are frequently treated as second-class credentials. Unmanaged third-party keys may be stored in plaintext config files or shared via insecure channels, and often lack rotation schedules, leading to long-lived exposure.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Inventory all third-party API keys used by the agent.\n2. Import each key into your secrets manager.\n3. Configure rotation: either automatic (where the third party supports it) or manual with alerting.\n4. Update the agent to retrieve third-party keys at runtime from the secrets manager.\n5. Revoke any keys that were stored in plaintext and issue replacements.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Store all third-party API keys in a secrets manager. Configure alerts for keys approaching their rotation deadline and document a manual rotation procedure for services that do not support automatic rotation.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_third_party_keys_managed"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-033 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
"""ASPM-033: AI agent third-party API keys must be managed in a secrets manager."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_third_party_keys_managed(Check):
|
||||
"""Check that each AI agent stores third-party API keys in a secrets manager.
|
||||
|
||||
Third-party API keys (Slack, GitHub, OpenAI, etc.) have the same risk profile
|
||||
as cloud credentials. Storing them in a secrets manager with rotation
|
||||
prevents sprawl and enables rapid revocation if compromised.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.third_party_keys_managed:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} third-party API keys are not managed "
|
||||
"through a secrets manager."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} third-party API keys are stored in a "
|
||||
"secrets manager with rotation."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Provider": "aspm",
|
||||
"CheckID": "aspm_agent_uses_secrets_manager",
|
||||
"CheckTitle": "AI agent must retrieve all credentials from a cloud secrets manager",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AI Agent Security Best Practices"
|
||||
],
|
||||
"ServiceName": "credentials",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "aspm-agent-{id}",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "AiAgent",
|
||||
"Description": "**AI Agent Credentials**. Verifies that the AI agent retrieves all credentials at runtime from a centralised secrets manager rather than from environment variables, configuration files, or hardcoded values.",
|
||||
"Risk": "Without a secrets manager, credentials are typically stored in environment variables or config files that are less protected, harder to audit, and cannot be automatically rotated. This significantly increases the likelihood and impact of credential compromise.",
|
||||
"RelatedUrl": "https://owasp.org/www-project-top-10-for-large-language-model-applications/",
|
||||
"AdditionalURLs": [],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Choose a secrets manager appropriate for your cloud: AWS Secrets Manager, HashiCorp Vault, Azure Key Vault, or GCP Secret Manager.\n2. Migrate all agent credentials into the secrets manager.\n3. Update the agent to retrieve credentials via the secrets manager SDK or sidecar injection.\n4. Remove all environment variable and config-file credential references.\n5. Enable automatic rotation for all managed secrets.",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Adopt a centralised secrets manager for all agent credentials. Use IAM-based authentication to access the secrets manager rather than static access keys.",
|
||||
"Url": "https://hub.prowler.com/check/aspm_agent_uses_secrets_manager"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"secrets"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "ASPM-028 check from the Prowler ASPM check suite."
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
"""ASPM-028: AI agent must retrieve credentials from a cloud secrets manager."""
|
||||
|
||||
from prowler.lib.check.models import Check, CheckReportASPM
|
||||
from prowler.providers.aspm.services.credentials.credentials_client import (
|
||||
credentials_client,
|
||||
)
|
||||
|
||||
|
||||
class aspm_agent_uses_secrets_manager(Check):
|
||||
"""Check that each AI agent retrieves all credentials from a cloud secrets manager.
|
||||
|
||||
Using a centralised secrets manager (AWS Secrets Manager, HashiCorp Vault,
|
||||
Azure Key Vault, GCP Secret Manager) eliminates static credential storage,
|
||||
enables automatic rotation, and provides a single audit point for credential
|
||||
access.
|
||||
"""
|
||||
|
||||
def execute(self) -> list[CheckReportASPM]:
|
||||
"""Execute the check against all loaded agents.
|
||||
|
||||
Returns:
|
||||
A list of CheckReportASPM findings, one per agent.
|
||||
"""
|
||||
findings = []
|
||||
for agent in credentials_client.agents:
|
||||
report = CheckReportASPM(metadata=self.metadata(), resource=agent)
|
||||
if not agent.credentials.uses_secrets_manager:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} does not use a cloud secrets manager "
|
||||
"— credentials may be stored insecurely."
|
||||
)
|
||||
else:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Agent {agent.name} retrieves all credentials from a cloud "
|
||||
"secrets manager."
|
||||
)
|
||||
findings.append(report)
|
||||
return findings
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user